kepler296e commited on
Commit
694f32d
·
1 Parent(s): 33e0372

fix pipeline

Browse files
Files changed (2) hide show
  1. main.py +4 -6
  2. sd/pipeline.py +3 -3
main.py CHANGED
@@ -19,6 +19,8 @@ pipe = pipe.to(device)
19
 
20
  def generate_image(prompt, negative_prompt, seed, guidance_scale, num_inference_steps, model):
21
 
 
 
22
  if model == "from-scratch":
23
  image = pipeline.generate(
24
  prompt=prompt,
@@ -30,16 +32,12 @@ def generate_image(prompt, negative_prompt, seed, guidance_scale, num_inference_
30
  width=512,
31
  height=512,
32
  generator=generator,
33
- models={},
34
- seed=seed,
35
  device=device,
36
  idle_device="cpu",
 
37
  tokenizer=tokenizer,
38
  )
39
- return image
40
  else:
41
- generator = torch.Generator(device=device).manual_seed(seed)
42
-
43
  image = pipe(
44
  prompt=prompt,
45
  negative_prompt=negative_prompt,
@@ -50,7 +48,7 @@ def generate_image(prompt, negative_prompt, seed, guidance_scale, num_inference_
50
  generator=generator,
51
  ).images[0]
52
 
53
- return image
54
 
55
  css="""
56
  #col-container {
 
19
 
20
  def generate_image(prompt, negative_prompt, seed, guidance_scale, num_inference_steps, model):
21
 
22
+ generator = torch.Generator(device=device).manual_seed(seed)
23
+
24
  if model == "from-scratch":
25
  image = pipeline.generate(
26
  prompt=prompt,
 
32
  width=512,
33
  height=512,
34
  generator=generator,
 
 
35
  device=device,
36
  idle_device="cpu",
37
+ models=models,
38
  tokenizer=tokenizer,
39
  )
 
40
  else:
 
 
41
  image = pipe(
42
  prompt=prompt,
43
  negative_prompt=negative_prompt,
 
48
  generator=generator,
49
  ).images[0]
50
 
51
+ return image
52
 
53
  css="""
54
  #col-container {
sd/pipeline.py CHANGED
@@ -7,15 +7,15 @@ def generate(
7
  prompt,
8
  uncond_prompt=None,
9
  input_image=None,
10
- strength=0.8,
11
- cfg_scale=7.5,
12
  n_inference_steps=50,
13
  width=512,
14
  height=512,
15
  generator=None,
16
- models={},
17
  device=None,
18
  idle_device=None,
 
19
  tokenizer=None,
20
  ):
21
  latents_width = width // 8
 
7
  prompt,
8
  uncond_prompt=None,
9
  input_image=None,
10
+ strength=0.9,
11
+ cfg_scale=8.0,
12
  n_inference_steps=50,
13
  width=512,
14
  height=512,
15
  generator=None,
 
16
  device=None,
17
  idle_device=None,
18
+ models={},
19
  tokenizer=None,
20
  ):
21
  latents_width = width // 8