dg845 commited on
Commit
5a6a815
1 Parent(s): 233ef10

Update app.py

Browse files

Switch to diffusers testing

Files changed (1) hide show
  1. app.py +89 -89
app.py CHANGED
@@ -1,109 +1,109 @@
1
  import gradio as gr
2
 
3
- # import torch
4
 
5
- # from diffusers import UniDiffuserPipeline
6
 
7
 
8
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
- # model_id = "dg845/unidiffuser-diffusers"
10
- # # model_id = "dg845/unidiffuser-diffusers-v0"
11
- # pipeline = UniDiffuserPipeline.from_pretrained(
12
- # model_id,
13
- # )
14
- # pipeline.to(device)
15
-
16
-
17
- # def convert_to_none(s):
18
- # if s:
19
- # return s
20
- # else:
21
- # return None
22
-
23
-
24
- # def set_mode(mode):
25
- # if mode == "joint":
26
- # pipeline.set_joint_mode()
27
- # elif mode == "text2img":
28
- # pipeline.set_text_to_image_mode()
29
- # elif mode == "img2text":
30
- # pipeline.set_image_text_mode()
31
- # elif mode == "text":
32
- # pipeline.set_text_mode()
33
- # elif mode == "img":
34
- # pipeline.set_image_mode()
35
-
36
-
37
- # def sample(mode, prompt, image, num_inference_steps, guidance_scale, seed):
38
- # set_mode(mode)
39
- # prompt = convert_to_none(prompt)
40
- # image = convert_to_none(image)
41
- # generator = torch.Generator(device=device).manual_seed(seed)
42
- # output_sample = pipeline(
43
- # prompt=prompt,
44
- # image=image,
45
- # num_inference_steps=num_inference_steps,
46
- # guidance_scale=guidance_scale,
47
- # generator=generator,
48
- # )
49
- # sample_image = None
50
- # sample_text = ""
51
- # if output_sample.images is not None:
52
- # sample_image = output_sample.images[0]
53
- # if output_sample.text is not None:
54
- # sample_text = output_sample.text[0]
55
- # return sample_image, sample_text
56
-
57
-
58
- # iface = gr.Interface(
59
- # fn=sample,
60
- # inputs=[
61
- # gr.Textbox(value="", label="Generation Task"),
62
- # gr.Textbox(value="", label="Conditioning prompt"),
63
- # gr.Image(value=None, label="Conditioning image", type="pil"),
64
- # gr.Number(value=20, label="Num Inference Steps", precision=0),
65
- # gr.Number(value=8.0, label="Guidance Scale"),
66
- # gr.Number(value=0, label="Seed", precision=0),
67
- # ],
68
- # outputs=[
69
- # gr.Image(label="Sample image"),
70
- # gr.Textbox(label="Sample text"),
71
- # ],
72
- # )
73
- # iface.launch()
74
-
75
- # from unidiffuser.sample_v0 import sample
76
- # from unidiffuser.sample_v0_test import sample
77
- # from unidiffuser.sample_v1 import sample
78
- from unidiffuser.sample_v1_test import sample
79
-
80
-
81
- def predict(mode, prompt, image, sample_steps, guidance_scale, seed):
82
- output_images, output_text = sample(
83
- mode, prompt, image, sample_steps=sample_steps, scale=guidance_scale, seed=seed,
84
  )
85
  sample_image = None
86
  sample_text = ""
87
- if output_images is not None:
88
- sample_image = output_images[0]
89
- if output_text is not None:
90
- sample_text = output_text[0]
91
  return sample_image, sample_text
92
-
93
 
94
  iface = gr.Interface(
95
- fn=predict,
96
  inputs=[
97
  gr.Textbox(value="", label="Generation Task"),
98
  gr.Textbox(value="", label="Conditioning prompt"),
99
- gr.Image(value=None, label="Conditioning image", type="filepath"),
100
- gr.Number(value=50, label="Num Inference Steps", precision=0),
101
- gr.Number(value=7.0, label="Guidance Scale"),
102
- gr.Number(value=1234, label="Seed", precision=0),
103
  ],
104
  outputs=[
105
  gr.Image(label="Sample image"),
106
  gr.Textbox(label="Sample text"),
107
  ],
108
  )
109
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ import torch
4
 
5
+ from diffusers import UniDiffuserPipeline
6
 
7
 
8
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
+ model_id = "dg845/unidiffuser-diffusers"
10
+ # model_id = "dg845/unidiffuser-diffusers-v0"
11
+ pipeline = UniDiffuserPipeline.from_pretrained(
12
+ model_id,
13
+ )
14
+ pipeline.to(device)
15
+
16
+
17
+ def convert_to_none(s):
18
+ if s:
19
+ return s
20
+ else:
21
+ return None
22
+
23
+
24
+ def set_mode(mode):
25
+ if mode == "joint":
26
+ pipeline.set_joint_mode()
27
+ elif mode == "text2img":
28
+ pipeline.set_text_to_image_mode()
29
+ elif mode == "img2text":
30
+ pipeline.set_image_text_mode()
31
+ elif mode == "text":
32
+ pipeline.set_text_mode()
33
+ elif mode == "img":
34
+ pipeline.set_image_mode()
35
+
36
+
37
+ def sample(mode, prompt, image, num_inference_steps, guidance_scale, seed):
38
+ set_mode(mode)
39
+ prompt = convert_to_none(prompt)
40
+ image = convert_to_none(image)
41
+ generator = torch.Generator(device=device).manual_seed(seed)
42
+ output_sample = pipeline(
43
+ prompt=prompt,
44
+ image=image,
45
+ num_inference_steps=num_inference_steps,
46
+ guidance_scale=guidance_scale,
47
+ generator=generator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  )
49
  sample_image = None
50
  sample_text = ""
51
+ if output_sample.images is not None:
52
+ sample_image = output_sample.images[0]
53
+ if output_sample.text is not None:
54
+ sample_text = output_sample.text[0]
55
  return sample_image, sample_text
56
+
57
 
58
  iface = gr.Interface(
59
+ fn=sample,
60
  inputs=[
61
  gr.Textbox(value="", label="Generation Task"),
62
  gr.Textbox(value="", label="Conditioning prompt"),
63
+ gr.Image(value=None, label="Conditioning image", type="pil"),
64
+ gr.Number(value=20, label="Num Inference Steps", precision=0),
65
+ gr.Number(value=8.0, label="Guidance Scale"),
66
+ gr.Number(value=0, label="Seed", precision=0),
67
  ],
68
  outputs=[
69
  gr.Image(label="Sample image"),
70
  gr.Textbox(label="Sample text"),
71
  ],
72
  )
73
+ iface.launch()
74
+
75
+ # from unidiffuser.sample_v0 import sample
76
+ # from unidiffuser.sample_v0_test import sample
77
+ # from unidiffuser.sample_v1 import sample
78
+ # from unidiffuser.sample_v1_test import sample
79
+
80
+
81
+ # def predict(mode, prompt, image, sample_steps, guidance_scale, seed):
82
+ # output_images, output_text = sample(
83
+ # mode, prompt, image, sample_steps=sample_steps, scale=guidance_scale, seed=seed,
84
+ # )
85
+ # sample_image = None
86
+ # sample_text = ""
87
+ # if output_images is not None:
88
+ # sample_image = output_images[0]
89
+ # if output_text is not None:
90
+ # sample_text = output_text[0]
91
+ # return sample_image, sample_text
92
+
93
+
94
+ # iface = gr.Interface(
95
+ # fn=predict,
96
+ # inputs=[
97
+ # gr.Textbox(value="", label="Generation Task"),
98
+ # gr.Textbox(value="", label="Conditioning prompt"),
99
+ # gr.Image(value=None, label="Conditioning image", type="filepath"),
100
+ # gr.Number(value=50, label="Num Inference Steps", precision=0),
101
+ # gr.Number(value=7.0, label="Guidance Scale"),
102
+ # gr.Number(value=1234, label="Seed", precision=0),
103
+ # ],
104
+ # outputs=[
105
+ # gr.Image(label="Sample image"),
106
+ # gr.Textbox(label="Sample text"),
107
+ # ],
108
+ # )
109
+ # iface.launch()