sdart commited on
Commit
99671bd
1 Parent(s): 0fabe81

added two models

Browse files
Files changed (2) hide show
  1. app.py +28 -9
  2. model.py +13 -4
app.py CHANGED
@@ -1,38 +1,53 @@
1
  import gradio as gr
2
 
3
- def create_demo(process):
 
 
 
 
 
 
 
 
4
  with gr.Blocks() as demo:
5
  with gr.Column():
6
  prompt = gr.Textbox(label='Prompt')
7
  n_prompt = gr.Textbox(
8
  label='Negative Prompt',
9
  value=
10
- 'low quality, ugly, disfigured, deformed'
11
  )
12
 
 
 
 
 
13
  run_button = gr.Button('Run')
14
- gr.Markdown("### Visit [Stable Diffusion Art](https://stable-diffusion-art.com/) for tutorials and resources. Read [Model license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) for Stable Diffusion v1.5.")
15
  result = gr.Gallery(label='Output',
16
  show_label=False,
17
  elem_id='gallery').style(columns=1, rows=1, preview=True)
18
 
 
19
  inputs = [
20
  prompt,
21
- n_prompt
 
22
  ]
 
23
  prompt.submit(
24
- fn=process,
25
  inputs=inputs,
26
  outputs=result
27
  )
28
  n_prompt.submit(
29
- fn=process,
30
  inputs=inputs,
31
  outputs=result
32
  )
33
 
34
  run_button.click(
35
- fn=process,
36
  inputs=inputs,
37
  outputs=result
38
  )
@@ -41,6 +56,10 @@ def create_demo(process):
41
 
42
  if __name__ == '__main__':
43
  from model import Model
44
- model = Model()
45
- demo = create_demo(model.process)
 
 
 
 
46
  demo.queue().launch()
 
1
  import gradio as gr
2
 
3
+ def generateImage(prompt, n_prompt, modelName):
4
+
5
+ return models[modelName].process(prompt, n_prompt)
6
+
7
+
8
+
9
+
10
+ def create_demo():
11
+
12
  with gr.Blocks() as demo:
13
  with gr.Column():
14
  prompt = gr.Textbox(label='Prompt')
15
  n_prompt = gr.Textbox(
16
  label='Negative Prompt',
17
  value=
18
+ 'ugly, disfigured, deformed'
19
  )
20
 
21
+ modelName = gr.Dropdown(choices = list(models.keys()),
22
+ label = "Model",
23
+ value=list(models.keys())[0])
24
+
25
  run_button = gr.Button('Run')
26
+ gr.Markdown("### [Stable Diffusion Art](https://stable-diffusion-art.com/) -- tutorials and resources. Read [Model license](https://huggingface.co/spaces/CompVis/stable-diffusion-license).")
27
  result = gr.Gallery(label='Output',
28
  show_label=False,
29
  elem_id='gallery').style(columns=1, rows=1, preview=True)
30
 
31
+
32
  inputs = [
33
  prompt,
34
+ n_prompt,
35
+ modelName,
36
  ]
37
+
38
  prompt.submit(
39
+ fn=generateImage,
40
  inputs=inputs,
41
  outputs=result
42
  )
43
  n_prompt.submit(
44
+ fn=generateImage,
45
  inputs=inputs,
46
  outputs=result
47
  )
48
 
49
  run_button.click(
50
+ fn=generateImage,
51
  inputs=inputs,
52
  outputs=result
53
  )
 
56
 
57
  if __name__ == '__main__':
58
  from model import Model
59
+ models = {
60
+ "Stable Diffusion v1.5": Model("runwayml/stable-diffusion-v1-5"),
61
+ "Realistic Vision v2.0": Model("SG161222/Realistic_Vision_V2.0"),
62
+ "Anything v3.0": Model("Linaqruf/anything-v3.0")
63
+ }
64
+ demo = create_demo()
65
  demo.queue().launch()
model.py CHANGED
@@ -6,6 +6,7 @@ from diffusers import DPMSolverMultistepScheduler
6
  import torch
7
  import PIL.Image
8
  import numpy as np
 
9
 
10
  # Check environment
11
  print(f"Is CUDA available: {torch.cuda.is_available()}")
@@ -19,11 +20,14 @@ device = "cuda"
19
 
20
 
21
  class Model:
22
- def __init__(self):
23
- modelID = "runwayml/stable-diffusion-v1-5"
 
 
24
  self.pipe = StableDiffusionPipeline.from_pretrained(modelID, torch_dtype=torch.float16)
25
  self.pipe = self.pipe.to(device)
26
  self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
 
27
 
28
  #self.pipe = StableDiffusionPipeline.from_pretrained(modelID)
29
  #prompt = "a photo of an astronaut riding a horse on mars"
@@ -36,15 +40,20 @@ class Model:
36
  num_images:int = 1,
37
  num_steps:int = 20,
38
  ):
39
- seed = np.random.randint(0, np.iinfo(np.int64).max)
40
  generator = torch.Generator(device).manual_seed(seed)
41
- return self.pipe(prompt=prompt,
 
 
 
 
42
  negative_prompt=negative_prompt,
43
  guidance_scale=guidance_scale,
44
  num_images_per_prompt=num_images,
45
  num_inference_steps=num_steps,
46
  generator=generator).images
47
 
 
48
 
49
 
50
 
 
6
  import torch
7
  import PIL.Image
8
  import numpy as np
9
+ import datetime
10
 
11
  # Check environment
12
  print(f"Is CUDA available: {torch.cuda.is_available()}")
 
20
 
21
 
22
  class Model:
23
+ def __init__(self, modelID):
24
+ #modelID = "runwayml/stable-diffusion-v1-5"
25
+
26
+ self.modelID = modelID
27
  self.pipe = StableDiffusionPipeline.from_pretrained(modelID, torch_dtype=torch.float16)
28
  self.pipe = self.pipe.to(device)
29
  self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
30
+ self.pipe.enable_xformers_memory_efficient_attention()
31
 
32
  #self.pipe = StableDiffusionPipeline.from_pretrained(modelID)
33
  #prompt = "a photo of an astronaut riding a horse on mars"
 
40
  num_images:int = 1,
41
  num_steps:int = 20,
42
  ):
43
+ seed = np.random.randint(0, np.iinfo(np.int32).max)
44
  generator = torch.Generator(device).manual_seed(seed)
45
+ now = datetime.datetime.now()
46
+ print(now)
47
+ print(self.modelID)
48
+ with torch.inference_mode():
49
+ images = self.pipe(prompt=prompt,
50
  negative_prompt=negative_prompt,
51
  guidance_scale=guidance_scale,
52
  num_images_per_prompt=num_images,
53
  num_inference_steps=num_steps,
54
  generator=generator).images
55
 
56
+ return images
57
 
58
 
59