NagaSaiAbhinay commited on
Commit
3edd8dd
1 Parent(s): 929d65e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -22
app.py CHANGED
@@ -3,28 +3,36 @@ import gradio as gr
3
  import torch
4
  import math
5
 
6
- orig_start_prompt = "A photograph of an adult Lion"
7
- orig_end_prompt = "A photograph of a Lion cub"
8
- model_list = ["kakaobrain/karlo-v1-alpha"]
 
 
 
 
 
 
 
 
 
9
 
10
  def unclip_text_interpolation(
11
- model_path,
12
  start_prompt,
13
  end_prompt,
14
- steps
 
15
  ):
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.bfloat16, custom_pipeline='unclip_text_interpolation')
18
-
19
- images = pipe(start_prompt, end_prompt, steps, enable_sequential_cpu_offload=False)
20
 
21
- return images
 
22
 
23
  inputs = [
24
- gr.Dropdown(model_list, value=model_list[0], label="Model"),
25
- gr.inputs.Textbox(lines=5, default=orig_start_prompt, label="Start Prompt"),
26
- gr.inputs.Textbox(lines=1, default=orig_end_prompt, label="End Prompt"),
27
- gr.inputs.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps")
28
  ]
29
 
30
  output = gr.Gallery(
@@ -32,19 +40,15 @@ output = gr.Gallery(
32
  ).style(grid=[2], height="auto")
33
 
34
  examples = [
35
- ["kakaobrain/karlo-v1-alpha", orig_start_prompt, orig_end_prompt, 6],
 
 
36
  ]
37
 
38
  title = "UnClip Text Interpolation Pipeline"
39
- description = """<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
40
- <br/>
41
- <a href="https://huggingface.co/spaces/kadirnar/stable-diffusion-2-infinite-zoom-out?duplicate=true">
42
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
43
- <p/>"""
44
 
45
  demo_app = gr.Interface(
46
  fn=unclip_text_interpolation,
47
- description=description,
48
  inputs=inputs,
49
  outputs=output,
50
  title=title,
@@ -52,4 +56,4 @@ demo_app = gr.Interface(
52
  examples=examples,
53
  cache_examples=True
54
  )
55
- demo_app.launch(debug=True, enable_queue=True)
 
3
  import torch
4
  import math
5
 
6
+ orig_start_prompt = "a photograph of an adult lion"
7
+ orig_end_prompt = "a photograph of a lion cub"
8
+
9
+ if torch.cuda.is_available():
10
+ device = "cuda"
11
+ dtype = torch.float16
12
+ else:
13
+ device = "cpu"
14
+ dtype = torch.bfloat16
15
+
16
+ pipe = DiffusionPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=dtype, custom_pipeline='unclip_text_interpolation')
17
+ pipe.to(device)
18
 
19
  def unclip_text_interpolation(
 
20
  start_prompt,
21
  end_prompt,
22
+ steps,
23
+ seed
24
  ):
25
+ generator = torch.Generator()
26
+ generator.manual_seed(seed)
 
 
27
 
28
+ output = pipe(start_prompt, end_prompt, steps, enable_sequential_cpu_offload=False, generator=generator)
29
+ return output.images
30
 
31
  inputs = [
32
+ gr.Textbox(lines=2, default=orig_start_prompt, label="Start Prompt"),
33
+ gr.Textbox(lines=2, default=orig_end_prompt, label="End Prompt"),
34
+ gr.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps"),
35
+ gr.Number(0, label="Seed", precision=0)
36
  ]
37
 
38
  output = gr.Gallery(
 
40
  ).style(grid=[2], height="auto")
41
 
42
  examples = [
43
+ [orig_start_prompt, orig_end_prompt, 5, 42],
44
+ ["a photo of a landscape in winter","a photo of a landscape in fall", 5, 20],
45
+ ["a photo of a victorian house", "a photo of a modern house", 5, 20]
46
  ]
47
 
48
  title = "UnClip Text Interpolation Pipeline"
 
 
 
 
 
49
 
50
  demo_app = gr.Interface(
51
  fn=unclip_text_interpolation,
 
52
  inputs=inputs,
53
  outputs=output,
54
  title=title,
 
56
  examples=examples,
57
  cache_examples=True
58
  )
59
+ demo_app.launch(debug=True, enable_queue=True)