AbstractQbit commited on
Commit
fc2e79d
·
1 Parent(s): 355328c

Hw 4 solution

Browse files
Files changed (1) hide show
  1. app.py +30 -5
app.py CHANGED
@@ -6,24 +6,32 @@ import random
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
 
 
11
 
12
- if torch.cuda.is_available():
13
  torch_dtype = torch.float16
14
  else:
15
  torch_dtype = torch.float32
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
  pipe = pipe.to(device)
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
 
 
 
 
 
23
 
24
  # @spaces.GPU #[uncomment to use ZeroGPU]
25
  def infer(
26
  prompt,
 
27
  negative_prompt,
28
  seed,
29
  randomize_seed,
@@ -33,6 +41,14 @@ def infer(
33
  num_inference_steps,
34
  progress=gr.Progress(track_tqdm=True),
35
  ):
 
 
 
 
 
 
 
 
36
  if randomize_seed:
37
  seed = random.randint(0, MAX_SEED)
38
 
@@ -48,6 +64,8 @@ def infer(
48
  generator=generator,
49
  ).images[0]
50
 
 
 
51
  return image, seed
52
 
53
 
@@ -68,6 +86,12 @@ with gr.Blocks(css=css) as demo:
68
  with gr.Column(elem_id="col-container"):
69
  gr.Markdown(" # Text-to-Image Gradio Template")
70
 
 
 
 
 
 
 
71
  with gr.Row():
72
  prompt = gr.Text(
73
  label="Prompt",
@@ -86,7 +110,7 @@ with gr.Blocks(css=css) as demo:
86
  label="Negative prompt",
87
  max_lines=1,
88
  placeholder="Enter a negative prompt",
89
- visible=False,
90
  )
91
 
92
  seed = gr.Slider(
@@ -139,6 +163,7 @@ with gr.Blocks(css=css) as demo:
139
  fn=infer,
140
  inputs=[
141
  prompt,
 
142
  negative_prompt,
143
  seed,
144
  randomize_seed,
 
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
+ device = "cuda" if torch.cuda.is_available() \
10
+ else "xpu" if torch.xpu.is_available() \
11
+ else "cpu"
12
+ current_model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
13
 
14
+ if torch.cuda.is_available() or torch.xpu.is_available():
15
  torch_dtype = torch.float16
16
  else:
17
  torch_dtype = torch.float32
18
 
19
+ pipe = DiffusionPipeline.from_pretrained(current_model_repo_id, torch_dtype=torch_dtype)
20
  pipe = pipe.to(device)
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1024
24
 
25
+ def clean_vram():
26
+ if torch.cuda.is_available():
27
+ torch.cuda.empty_cache()
28
+ if torch.xpu.is_available():
29
+ torch.xpu.empty_cache()
30
 
31
  # @spaces.GPU #[uncomment to use ZeroGPU]
32
  def infer(
33
  prompt,
34
+ model_repo,
35
  negative_prompt,
36
  seed,
37
  randomize_seed,
 
41
  num_inference_steps,
42
  progress=gr.Progress(track_tqdm=True),
43
  ):
44
+ global current_model_repo_id, pipe
45
+
46
+ if model_repo != current_model_repo_id:
47
+ print(f"The model changed to {model_repo}, reloading pipeline...")
48
+ del pipe
49
+ clean_vram()
50
+ pipe = DiffusionPipeline.from_pretrained(model_repo, torch_dtype=torch_dtype).to(device)
51
+
52
  if randomize_seed:
53
  seed = random.randint(0, MAX_SEED)
54
 
 
64
  generator=generator,
65
  ).images[0]
66
 
67
+ clean_vram()
68
+
69
  return image, seed
70
 
71
 
 
86
  with gr.Column(elem_id="col-container"):
87
  gr.Markdown(" # Text-to-Image Gradio Template")
88
 
89
+ model_repo = gr.Dropdown(
90
+ label="Model repository path",
91
+ choices=["stabilityai/sdxl-turbo", "CompVis/stable-diffusion-v1-4"],
92
+ allow_custom_value=True
93
+ )
94
+
95
  with gr.Row():
96
  prompt = gr.Text(
97
  label="Prompt",
 
110
  label="Negative prompt",
111
  max_lines=1,
112
  placeholder="Enter a negative prompt",
113
+ visible=True,
114
  )
115
 
116
  seed = gr.Slider(
 
163
  fn=infer,
164
  inputs=[
165
  prompt,
166
+ model_repo,
167
  negative_prompt,
168
  seed,
169
  randomize_seed,