genevera commited on
Commit
1baaf3c
1 Parent(s): 28d5bd6

dont hardcode the generator device

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -181,8 +181,8 @@ def greet(audio, steps=25, scheduler="ddpm"):
181
  token_embeds = model.text_encoder.get_input_embeddings().weight.data
182
 
183
  token_embeds[model.placeholder_token_id] = audio_token.clone()
184
- g_gpu = torch.Generator(device='cuda')
185
- g_gpu.manual_seed(23229249375547) # no reason this can't be input by the user!
186
  pipeline = StableDiffusionPipeline.from_pretrained(
187
  pretrained_model_name_or_path=model.repo_id,
188
  tokenizer=model.tokenizer,
@@ -194,10 +194,8 @@ def greet(audio, steps=25, scheduler="ddpm"):
194
  ).to(device)
195
  pipeline.enable_xformers_memory_efficient_attention()
196
 
197
- # pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
198
- # pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
199
- print(f"taking {steps} steps using the {scheduler} scheduler")
200
- image = pipeline(prompt, num_inference_steps=steps, guidance_scale=8.5, generator=g_gpu).images[0]
201
  return image
202
 
203
 
 
181
  token_embeds = model.text_encoder.get_input_embeddings().weight.data
182
 
183
  token_embeds[model.placeholder_token_id] = audio_token.clone()
184
+ generator = torch.Generator(device=device)
185
+ generator.manual_seed(23229249375547) # no reason this can't be input by the user!
186
  pipeline = StableDiffusionPipeline.from_pretrained(
187
  pretrained_model_name_or_path=model.repo_id,
188
  tokenizer=model.tokenizer,
 
194
  ).to(device)
195
  pipeline.enable_xformers_memory_efficient_attention()
196
 
197
+ # print(f"taking {steps} steps using the {scheduler} scheduler")
198
+ image = pipeline(prompt, num_inference_steps=steps, guidance_scale=8.5, generator=generator).images[0]
 
 
199
  return image
200
 
201