jayparmr commited on
Commit
4c9bb7b
1 Parent(s): 0bec97d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -38
app.py CHANGED
@@ -1,21 +1,42 @@
 
1
  import gradio as gr
2
  import torch
3
  # from diffusers import DiffusionPipeline
4
  from diffusers import StableDiffusionPipeline
 
 
 
 
 
 
 
5
 
6
- def generate(prompt, negative_prompts, samples, steps,scale, seed):
7
  pipeline = StableDiffusionPipeline.from_pretrained("jayparmr/icbinp", use_auth_token="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", torch_dtype=torch.float16)
8
  pipeline.to("cuda")
9
- # return pipeline(prompt).images[0]
10
-
11
  generator = torch.Generator(device="cuda").manual_seed(int(seed))
 
12
  images_list = pipeline(
13
  [prompt] * samples,
14
  negative_prompt= [negative_prompts] * samples,
15
  num_inference_steps=steps,
16
  guidance_scale=scale,
17
  generator=generator,
 
 
18
  )
 
 
 
 
 
 
 
 
 
 
 
 
19
  images = []
20
  print(images_list)
21
  for i, image in enumerate(images_list["images"]):
@@ -27,45 +48,34 @@ block = gr.Blocks()
27
  with block:
28
  with gr.Group():
29
  with gr.Box():
30
- with gr.Row().style(mobile_collapse=False, equal_height=True):
31
  text = gr.Textbox(
32
  label="Enter your prompt",
33
  show_label=False,
34
  max_lines=1,
35
  placeholder="Enter your prompt",
36
- ).style(
37
- border=(True, False, True, True),
38
- rounded=(True, False, False, True),
39
- container=False,
40
  )
41
  negative_text = gr.Textbox(
 
42
  label="Enter your negative prompt",
43
  show_label=False,
44
  max_lines=1,
45
  placeholder="Enter your negative prompt",
46
- ).style(
47
- border=(True, False, True, True),
48
- rounded=(True, False, False, True),
49
- container=False,
50
- )
51
- btn = gr.Button("Generate image").style(
52
- margin=False,
53
- rounded=(False, True, True, False),
54
  )
55
- # with gr.Row().style(mobile_collapse=False, equal_height=True):
56
- # btn = gr.Button("Generate image").style(
57
- # margin=False,
58
- # rounded=(False, True, True, False),
59
- # )
60
  gallery = gr.Gallery(
61
- label="Generated images", show_label=False, elem_id="gallery"
62
- ).style(grid=[2], height="auto")
 
 
 
63
 
64
- # advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
65
 
66
  with gr.Row(elem_id="advanced-options"):
67
  samples = gr.Slider(label="Images", minimum=1, maximum=4, value=1, step=1)
68
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=25, step=1)
 
 
69
  scale = gr.Slider(
70
  label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
71
  )
@@ -73,18 +83,10 @@ with block:
73
  label="Seed",
74
  minimum=0,
75
  maximum=2147483647,
76
- step=1,
77
- randomize=True,
78
  )
79
- text.submit(generate, inputs=[text,negative_text, samples, steps, scale, seed], outputs=gallery)
80
- btn.click(generate, inputs=[text,negative_text, samples, steps, scale, seed], outputs=gallery)
81
- # advanced_button.click(
82
- # None,
83
- # [],
84
- # text,
85
- # )
86
 
87
- block.launch()
88
-
89
- # iface = gr.Interface(fn=generate, inputs="text", outputs="image")
90
- # iface.launch()
 
1
+
2
  import gradio as gr
3
  import torch
4
  # from diffusers import DiffusionPipeline
5
  from diffusers import StableDiffusionPipeline
6
+ from diffusers.models import AutoencoderKL
7
+ from diffusers import StableDiffusionPipeline
8
+
9
+
10
+
11
+
12
+ def generate(prompt, negative_prompts, samples, steps,scale, seed, width, height):
13
 
 
14
  pipeline = StableDiffusionPipeline.from_pretrained("jayparmr/icbinp", use_auth_token="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", torch_dtype=torch.float16)
15
  pipeline.to("cuda")
16
+
 
17
  generator = torch.Generator(device="cuda").manual_seed(int(seed))
18
+
19
  images_list = pipeline(
20
  [prompt] * samples,
21
  negative_prompt= [negative_prompts] * samples,
22
  num_inference_steps=steps,
23
  guidance_scale=scale,
24
  generator=generator,
25
+ width=width,
26
+ height=height
27
  )
28
+
29
+ # vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae")
30
+ # pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae).to("cuda")
31
+
32
+
33
+ # images_list = pipe(
34
+ # [prompt] * samples,
35
+ # negative_prompt= [negative_prompts] * samples,
36
+ # num_inference_steps=steps,
37
+ # guidance_scale=scale
38
+ # )
39
+ print("stop gen")
40
  images = []
41
  print(images_list)
42
  for i, image in enumerate(images_list["images"]):
 
48
  with block:
49
  with gr.Group():
50
  with gr.Box():
51
+ with gr.Row().style(equal_height=True):
52
  text = gr.Textbox(
53
  label="Enter your prompt",
54
  show_label=False,
55
  max_lines=1,
56
  placeholder="Enter your prompt",
 
 
 
 
57
  )
58
  negative_text = gr.Textbox(
59
+ value="",
60
  label="Enter your negative prompt",
61
  show_label=False,
62
  max_lines=1,
63
  placeholder="Enter your negative prompt",
 
 
 
 
 
 
 
 
64
  )
65
+ btn = gr.Button("Generate image")
 
 
 
 
66
  gallery = gr.Gallery(
67
+ label="Generated images", show_label=False, elem_id="gallery", width = 768
68
+ )
69
+ # gallery = gr.Image(
70
+ # label="Generated images", elem_id="gallery", width = 768, height = 536
71
+ # )
72
 
 
73
 
74
  with gr.Row(elem_id="advanced-options"):
75
  samples = gr.Slider(label="Images", minimum=1, maximum=4, value=1, step=1)
76
+ steps = gr.Slider(label="Steps", minimum=1, maximum=500, value=100, step=1)
77
+ width = gr.Slider(label="width", minimum=1, maximum=2048, value=512, step=1)
78
+ height = gr.Slider(label="height", minimum=1, maximum=2048, value=512, step=1)
79
  scale = gr.Slider(
80
  label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
81
  )
 
83
  label="Seed",
84
  minimum=0,
85
  maximum=2147483647,
86
+ step=1
 
87
  )
88
+ text.submit(generate, inputs=[text,negative_text, samples, steps, scale, seed, width, height], outputs=gallery)
89
+ btn.click(generate, inputs=[text,negative_text, samples, steps, scale, seed, width, height], outputs=gallery)
 
 
 
 
 
90
 
91
+
92
+ block.launch()