Omnibus commited on
Commit
4757914
1 Parent(s): 044a929

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -25
app.py CHANGED
@@ -9,6 +9,7 @@ import base64
9
  import torch
10
  from diffusers import AutoPipelineForImage2Image
11
  from diffusers.utils import make_image_grid, load_image
 
12
 
13
  base_url=f'https://omnibus-top-20-img-img.hf.space/file='
14
  loaded_model=[]
@@ -23,31 +24,83 @@ print (loaded_model)
23
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16", use_safetensors=True).to("cpu")
24
  pipeline.unet = torch.compile(pipeline.unet)
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def load_model(model_drop):
27
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
28
 
29
- def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer):
 
30
  print(f'im_path:: {im_path}')
31
  print(f'im_path0:: {im_path.root[0]}')
32
  print(f'im_path0.image.path:: {im_path.root[0].image.path}')
33
  out_box=[]
34
-
 
35
  for i,ea in enumerate(im_path.root):
36
- print(f'root::{im_path.root[i]}')
37
- #print(f'ea:: {ea}')
38
- #print(f'impath:: {im_path.path}')
39
- url = base_url+im_path.root[i].image.path
40
- print(url)
41
- #init_image = load_image(url)
42
- init_image=load_image(url)
43
- #prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
44
-
45
- # pass prompt and image to pipeline
46
- #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
47
- image = pipeline(prompt, image=init_image, strength=float(strength),guidance_scale=float(guidance),num_inference_steps=int(infer)).images[0]
48
- #make_image_grid([init_image, image], rows=1, cols=2)
49
- out_box.append(image)
50
- yield out_box,""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
 
53
 
@@ -155,14 +208,14 @@ def load_im(img):
155
  im_box=[]
156
  im = Image.open(img)
157
  width, height = im.size
158
- new_w=int(width/10)
159
  new_h=new_w
160
  w=0
161
  h=0
162
  newsize=(512,512)
163
  for i in range(int(height/new_h)):
164
  print(i)
165
- for b in range(10):
166
  print(b)
167
  # Setting the points for cropped image
168
  left = w
@@ -180,7 +233,7 @@ def load_im(img):
180
  #yield im_box,[]
181
  h+=new_h
182
  w=0
183
- yield im_box,im_box
184
  with gr.Blocks(css=css) as app:
185
  with gr.Row():
186
  with gr.Column():
@@ -198,11 +251,16 @@ with gr.Blocks(css=css) as app:
198
  with gr.Row():
199
  model_drop=gr.Dropdown(label="Models", choices=models, type='index', value=models[0])
200
  cnt = gr.Number(value=1)
 
201
  out_html=gr.HTML()
202
- outp=gr.Gallery(columns=10)
203
- fingal=gr.Gallery(columns=10)
204
- im_list=gr.Textbox()
205
- im_btn.click(load_im,inp_im,[outp,im_list])
206
- go_btn = btn.click(run_dif,[inp,outp,model_drop,cnt,strength,guidance,infer],[fingal,out_html])
 
 
 
 
207
  stop_btn.click(None,None,None,cancels=[go_btn])
208
  app.queue().launch()
 
9
  import torch
10
  from diffusers import AutoPipelineForImage2Image
11
  from diffusers.utils import make_image_grid, load_image
12
+ import uuid
13
 
14
  base_url=f'https://omnibus-top-20-img-img.hf.space/file='
15
  loaded_model=[]
 
24
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None, variant="fp16", use_safetensors=True).to("cpu")
25
  pipeline.unet = torch.compile(pipeline.unet)
26
 
27
+ grid_wide=10
28
+
29
+
30
+
31
+ def get_concat_h_cut(in1, in2):
32
+ im1=Image.open(in1)
33
+ im2=Image.open(in2)
34
+ dst = Image.new('RGB', (im1.width + im2.width,
35
+ min(im1.height, im2.height)))
36
+ dst.paste(im1, (0, 0))
37
+ dst.paste(im2, (im1.width, 0))
38
+ return dst
39
+
40
+
41
+ def get_concat_v_cut(in1, in2):
42
+ im1=Image.open(in1)
43
+ im2=Image.open(in2)
44
+ dst = Image.new(
45
+ 'RGB', (min(im1.width, im2.width), im1.height + im2.height))
46
+ dst.paste(im1, (0, 0))
47
+ dst.paste(im2, (0, im1.height))
48
+ return dst
49
+
50
+
51
+
52
+
53
+
54
+
55
  def load_model(model_drop):
56
  pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
57
 
58
+ def run_dif(prompt,im_path,model_drop,cnt,strength,guidance,infer,im_height,im_width):
59
+ uid=uuid.uuid4()
60
  print(f'im_path:: {im_path}')
61
  print(f'im_path0:: {im_path.root[0]}')
62
  print(f'im_path0.image.path:: {im_path.root[0].image.path}')
63
  out_box=[]
64
+ im_height=int(im_height)
65
+ im_width=int(im_width)
66
  for i,ea in enumerate(im_path.root):
67
+ for hh in range(im_height/grid_wide):
68
+ for b in range(im_width/grid_wide):
69
+
70
+ print(f'root::{im_path.root[i]}')
71
+ #print(f'ea:: {ea}')
72
+ #print(f'impath:: {im_path.path}')
73
+ url = base_url+im_path.root[i].image.path
74
+ print(url)
75
+ #init_image = load_image(url)
76
+ init_image=load_image(url)
77
+ #prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
78
+
79
+ # pass prompt and image to pipeline
80
+ #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
81
+ image = pipeline(prompt, image=init_image, strength=float(strength),guidance_scale=float(guidance),num_inference_steps=int(infer)).images[0]
82
+ #make_image_grid([init_image, image], rows=1, cols=2)
83
+
84
+
85
+ out_box.append(image)
86
+
87
+ if out_box:
88
+ if len(out_box)>1:
89
+ im_roll = get_concat_v_cut(f'{out_box[0]}',f'{out_box[1]}')
90
+ im_roll.save(f'comb-{uid}-tmp.png')
91
+ for i in range(2,len(out_box)):
92
+ im_roll = get_concat_v_cut(f'comb-{uid}-tmp.png',f'{out_box[i]}')
93
+ im_roll.save(f'comb-{uid}-tmp.png')
94
+ out = f'comb-{uid}-tmp.png'
95
+ else:
96
+ tmp_im = Image.open(out_box[0])
97
+ tmp_im.save(f'comb-{uid}-tmp.png')
98
+ out = f'comb-{uid}-tmp.png'
99
+
100
+
101
+
102
+
103
+ yield out,""
104
 
105
 
106
 
 
208
  im_box=[]
209
  im = Image.open(img)
210
  width, height = im.size
211
+ new_w=int(width/grid_wide)
212
  new_h=new_w
213
  w=0
214
  h=0
215
  newsize=(512,512)
216
  for i in range(int(height/new_h)):
217
  print(i)
218
+ for b in range(grid_wide):
219
  print(b)
220
  # Setting the points for cropped image
221
  left = w
 
233
  #yield im_box,[]
234
  h+=new_h
235
  w=0
236
+ yield im_box,im_box,height,width
237
  with gr.Blocks(css=css) as app:
238
  with gr.Row():
239
  with gr.Column():
 
251
  with gr.Row():
252
  model_drop=gr.Dropdown(label="Models", choices=models, type='index', value=models[0])
253
  cnt = gr.Number(value=1)
254
+
255
  out_html=gr.HTML()
256
+ outp=gr.Gallery(columns=grid_wide)
257
+ #fingal=gr.Gallery(columns=grid_wide)
258
+ fin=gr.Image()
259
+ im_height=gr.Number()
260
+ im_width=gr.Number()
261
+
262
+ im_list=gr.Textbox(visible=False)
263
+ im_btn.click(load_im,inp_im,[outp,im_list,im_height,im_width])
264
+ go_btn = btn.click(run_dif,[inp,outp,model_drop,cnt,strength,guidance,infer,im_height,im_width],[fin,out_html])
265
  stop_btn.click(None,None,None,cancels=[go_btn])
266
  app.queue().launch()