NagaSaiAbhinay commited on
Commit
e826573
1 Parent(s): ea3971b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -2,9 +2,7 @@ from diffusers import DiffusionPipeline
2
  import gradio as gr
3
  import torch
4
  import math
5
-
6
- orig_start_prompt = "a photograph of an adult lion"
7
- orig_end_prompt = "a photograph of a lion cub"
8
 
9
  if torch.cuda.is_available():
10
  device = "cuda"
@@ -16,21 +14,22 @@ else:
16
  pipe = DiffusionPipeline.from_pretrained("kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=dtype, custom_pipeline='unclip_image_interpolation')
17
  pipe.to(device)
18
 
19
- def unclip_text_interpolation(
20
- start_prompt,
21
- end_prompt,
22
  steps,
23
  seed
24
  ):
25
  generator = torch.Generator()
26
  generator.manual_seed(seed)
27
 
28
- output = pipe(start_prompt, end_prompt, steps, enable_sequential_cpu_offload=False, generator=generator)
 
29
  return output.images
30
 
31
  inputs = [
32
- gr.Image(lines=2, default=orig_start_prompt, label="Start Prompt"),
33
- gr.Image(lines=2, default=orig_end_prompt, label="End Prompt"),
34
  gr.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps"),
35
  gr.Number(0, label="Seed", precision=0)
36
  ]
@@ -40,15 +39,15 @@ output = gr.Gallery(
40
  ).style(grid=[2], height="auto")
41
 
42
  examples = [
43
- [orig_start_prompt, orig_end_prompt, 5, 42],
44
- ["a photo of a landscape in winter","a photo of a landscape in fall", 5, 20],
45
- ["a photo of a victorian house", "a photo of a modern house", 5, 20]
46
  ]
47
 
48
  title = "UnClip Image Interpolation Pipeline"
49
 
50
  demo_app = gr.Interface(
51
- fn=unclip_text_interpolation,
52
  inputs=inputs,
53
  outputs=output,
54
  title=title,
 
2
  import gradio as gr
3
  import torch
4
  import math
5
+ import PIL
 
 
6
 
7
  if torch.cuda.is_available():
8
  device = "cuda"
 
14
  pipe = DiffusionPipeline.from_pretrained("kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=dtype, custom_pipeline='unclip_image_interpolation')
15
  pipe.to(device)
16
 
17
+ def unclip_image_interpolation(
18
+ start_image,
19
+ end_image,
20
  steps,
21
  seed
22
  ):
23
  generator = torch.Generator()
24
  generator.manual_seed(seed)
25
 
26
+ images = [start_image, end_image]
27
+ output = pipe(images, steps, enable_sequential_cpu_offload=False, generator=generator)
28
  return output.images
29
 
30
  inputs = [
31
+ gr.Image(type="pil"),
32
+ gr.Image(type="pil"),
33
  gr.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps"),
34
  gr.Number(0, label="Seed", precision=0)
35
  ]
 
39
  ).style(grid=[2], height="auto")
40
 
41
  examples = [
42
+ ["starry_night.jpg","dogs.jpg", 5, 20],
43
+ ["flowers.jpg", "dogs.jpg", 5, 42],
44
+ ["starry_night.jpg","flowers.jpg", 6, 9011]
45
  ]
46
 
47
  title = "UnClip Image Interpolation Pipeline"
48
 
49
  demo_app = gr.Interface(
50
+ fn=unclip_image_interpolation,
51
  inputs=inputs,
52
  outputs=output,
53
  title=title,