File size: 9,149 Bytes
a85be17
 
7b52fe5
bd1d32c
 
 
b4cc1c9
42c5e66
 
 
 
4757914
42c5e66
5858c5c
a85be17
 
bd1d32c
 
 
 
 
a85be17
bd1d32c
8055b15
044a929
42c5e66
4757914
 
 
 
 
480a424
 
c8857c3
 
 
 
4757914
 
 
 
 
 
 
 
480a424
 
 
 
 
 
4757914
 
 
 
 
 
 
 
 
 
 
5858c5c
 
42c5e66
4757914
 
0b5c615
26a7afe
523f702
5858c5c
4757914
 
52874c5
9db9921
52874c5
 
4757914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fd434a
 
4757914
 
 
 
 
 
 
42c5e66
 
 
 
 
6b28599
bd1d32c
b4cc1c9
e122d23
9890778
6b28599
382d70a
e122d23
6b28599
e122d23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382d70a
 
 
 
 
 
 
 
b4cc1c9
382d70a
b4cc1c9
382d70a
b4cc1c9
af5c7ef
 
382d70a
 
b87e516
 
6642a1d
e369512
b87e516
 
 
d3a8ff8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e122d23
 
d3a8ff8
 
e122d23
 
d3a8ff8
bd74b6e
d3a8ff8
e122d23
 
d3a8ff8
 
bd74b6e
d3a8ff8
 
b87e516
 
 
af5c7ef
b87e516
e369512
af5c7ef
e369512
b87e516
 
8a23d91
 
 
 
 
4757914
6c656e8
9ede6fd
 
e6554f6
9ede6fd
fd5acb8
4757914
fd5acb8
6c656e8
9ede6fd
 
6c656e8
 
 
 
 
 
8bb9064
e6554f6
6c656e8
9ede6fd
d4b4792
9ede6fd
6bf7a1b
4757914
b87e516
e122d23
5858c5c
 
a4bae92
 
 
 
f39ff8a
 
a4bae92
8a23d91
 
f39ff8a
e122d23
6b28599
9890778
4757914
b4cc1c9
4757914
 
 
 
 
 
 
 
 
f8d7fe9
a293cc4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
import gradio as gr
from models import models
from PIL import Image
import requests
import uuid
import io 
import base64

import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
import uuid

base_url=f'https://omnibus-top-20-img-img.hf.space/file='
loaded_model=[]
for i,model in enumerate(models):
    try:
        loaded_model.append(gr.load(f'models/{model}'))
    except Exception as e:
        print(e)
        pass
print (loaded_model)

pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16", use_safetensors=True).to("cpu")
pipeline.unet = torch.compile(pipeline.unet)

grid_wide=10



def get_concat_h_cut(in1, in2):
    print(in1)
    print(in2)
    #im1=Image.open(in1)
    #im2=Image.open(in2)
    im1=in1
    im2=in2
    dst = Image.new('RGB', (im1.width + im2.width,
                            min(im1.height, im2.height)))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (im1.width, 0))
    return dst


def get_concat_v_cut(in1, in2):
    print(in1)
    print(in2)
    im1=Image.open(in1)
    im2=Image.open(in2)
    #im1=in1
    #im2=in2
    dst = Image.new(
        'RGB', (min(im1.width, im2.width), im1.height + im2.height))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (0, im1.height))
    return dst






def load_model(model_drop):
    pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)

def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer,im_height,im_width):
    uid=uuid.uuid4()
    print(f'im_path:: {im_path}')
    print(f'im_path0:: {im_path.root[0]}')
    print(f'im_path0.image.path:: {im_path.root[0].image.path}')
    out_box=[]
    im_height=int(im_height)
    im_width=int(im_width)
    
    for i,ea in enumerate(im_path.root):
        for hh in range(int(im_height/grid_wide)):
            for b in range(int(im_width/grid_wide)):
            
                print(f'root::{im_path.root[i]}')
                #print(f'ea:: {ea}')        
                #print(f'impath:: {im_path.path}')
                url = base_url+im_path.root[i].image.path
                print(url)
                #init_image = load_image(url)
                init_image=load_image(url)
                #prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
                
                # pass prompt and image to pipeline
                #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
                image = pipeline(prompt, image=init_image, strength=float(strength),guidance_scale=float(guidance),num_inference_steps=int(infer)).images[0]
                #make_image_grid([init_image, image], rows=1, cols=2)
                
                
                out_box.append(image)

                if out_box:
                    if len(out_box)>1:
                        im_roll = get_concat_v_cut(f'{out_box[0]}',f'{out_box[1]}')
                        im_roll.save(f'comb-{uid}-tmp.png')                
                        for i in range(2,len(out_box)):
                            im_roll = get_concat_v_cut(f'comb-{uid}-tmp.png',f'{out_box[i]}')
                            im_roll.save(f'comb-{uid}-tmp.png')
                        out = f'comb-{uid}-tmp.png'
                    else:
                        #tmp_im = Image.open(out_box[0])
                        tmp_im = out_box[0]
                        tmp_im.save(f'comb-{uid}-tmp.png')
                        out = f'comb-{uid}-tmp.png'



                
                yield out,""




def run_dif_old(out_prompt,model_drop,cnt):
    p_seed=""
    out_box=[]
    out_html=""
    #for i,ea in enumerate(loaded_model):
    for i in range(int(cnt)):
        p_seed+=" "
        try:
            model=loaded_model[int(model_drop)]
            out_img=model(out_prompt+p_seed)
            print(out_img)
            out_box.append(out_img)           
        except Exception as e:
            print(e)
            out_html=str(e)
            pass
        yield out_box,out_html

def run_dif_og(out_prompt,model_drop,cnt):
    out_box=[]
    out_html=""
    #for i,ea in enumerate(loaded_model):
    for i in range(cnt):
        try:
            #print (ea)
            model=loaded_model[int(model_drop)]
            out_img=model(out_prompt)
            print(out_img)
            url=f'https://omnibus-top-20.hf.space/file={out_img}'
            print(url)
            uid = uuid.uuid4()
            #urllib.request.urlretrieve(image, 'tmp.png')
            #out=Image.open('tmp.png')
            r = requests.get(url, stream=True)
            
            if r.status_code == 200:
                img_buffer = io.BytesIO(r.content)
                print (f'bytes:: {io.BytesIO(r.content)}')
                str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
                img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"                
                out_html+=f"<div  class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
                out = Image.open(io.BytesIO(r.content))
                out_box.append(out)
            html_out = "<div class='grid_class'>"+out_html+"</div>"
            yield out_box,html_out
        except Exception as e:
            out_html+=str(e)
            html_out = "<div class='grid_class'>"+out_html+"</div>"
            
            yield out_box,html_out

def thread_dif(out_prompt,mod):
    out_box=[]
    out_html=""
    #for i,ea in enumerate(loaded_model):
    try:
        print (ea)
        model=loaded_model[int(mod)]
        out_img=model(out_prompt)
        print(out_img)
        url=f'https://omnibus-top-20.hf.space/file={out_img}'
        print(url)
        uid = uuid.uuid4()
        #urllib.request.urlretrieve(image, 'tmp.png')
        #out=Image.open('tmp.png')
        r = requests.get(url, stream=True)
        
        if r.status_code == 200:
            img_buffer = io.BytesIO(r.content)
            print (f'bytes:: {io.BytesIO(r.content)}')
            str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
            img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"                
            
            #out_html+=f"<div  class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
            out = Image.open(io.BytesIO(r.content))
            out_box.append(out)
        else:
            out_html=r.status_code
        html_out = "<div class='grid_class'>"+out_html+"</div>"
        return out_box,html_out
    except Exception as e:
        out_html=str(e)
        #out_html+=str(e)
        html_out = "<div class='grid_class'>"+out_html+"</div>"
        
        return out_box,html_out


css="""
.grid_class{
display:flex;
height:100%;
}
.img_class{
min-width:200px;
}

"""

def load_im(img):
    im_box=[]
    im = Image.open(img)
    width, height = im.size
    new_w=int(width/grid_wide)
    new_h=new_w
    w=0
    h=0
    newsize=(512,512)
    for i in range(int(height/new_h)):
        print(i)
        for b in range(grid_wide):
            print(b)
            # Setting the points for cropped image
            left = w
            top = h
            right = left+new_w
            bottom = top+new_h
             
            # Cropped image of above dimension
            # (It will not change original image)
            im1 = im.crop((left, top, right, bottom))
            im1 = im1.resize(newsize)
            
            im_box.append(im1)
            w+=new_w
        #yield im_box,[]
        h+=new_h
        w=0
    yield im_box,im_box,height,width
with gr.Blocks(css=css) as app:
    with gr.Row():
        with gr.Column():
            inp=gr.Textbox(label="Prompt")
            strength=gr.Slider(label="Strength",minimum=0,maximum=1,step=0.1,value=0.2)
            guidance=gr.Slider(label="Guidance",minimum=0,maximum=10,step=0.1,value=8.0)
            infer=gr.Slider(label="Inference Steps",minimum=0,maximum=50,step=1,value=10)
            
            with gr.Row():
                btn=gr.Button()
                stop_btn=gr.Button("Stop")
        with gr.Column():
            inp_im=gr.Image(type='filepath')
            im_btn=gr.Button("Image Grid")
    with gr.Row():
        model_drop=gr.Dropdown(label="Models", choices=models, type='index', value=models[0])
        cnt = gr.Number(value=1)
        
    out_html=gr.HTML()
    outp=gr.Gallery(columns=grid_wide)
    #fingal=gr.Gallery(columns=grid_wide)
    fin=gr.Image()
    im_height=gr.Number()
    im_width=gr.Number()
    
    im_list=gr.Textbox(visible=False)    
    im_btn.click(load_im,inp_im,[outp,im_list,im_height,im_width])
    go_btn = btn.click(run_dif,[inp,outp,model_drop,cnt,strength,guidance,infer,im_height,im_width],[fin,out_html])
    stop_btn.click(None,None,None,cancels=[go_btn])
app.queue().launch()