Srimanth Agastyaraju commited on
Commit
c4737c4
1 Parent(s): 2aad33f

Reduce num_steps in inference pipeline to 5

Browse files
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -24,7 +24,7 @@ def inference(prompt, model, n_images, seed):
24
  print(f"Inferencing '{prompt}' for {n_images} images.")
25
 
26
  for i in range(n_images):
27
- result = pipe(prompt, generator=generators[i], num_inference_steps=25).images[0]
28
  result_images.append(result)
29
 
30
  # Start with empty UI elements
@@ -50,9 +50,6 @@ def inference(prompt, model, n_images, seed):
50
  st.image(result_images[i], caption=f"Image - {i+3}")
51
 
52
 
53
- def main():
54
- pass
55
-
56
  if __name__ == "__main__":
57
  # --- START UI ---
58
  st.title("Finetune LoRA inference")
 
24
  print(f"Inferencing '{prompt}' for {n_images} images.")
25
 
26
  for i in range(n_images):
27
+ result = pipe(prompt, generator=generators[i], num_inference_steps=5).images[0]
28
  result_images.append(result)
29
 
30
  # Start with empty UI elements
 
50
  st.image(result_images[i], caption=f"Image - {i+3}")
51
 
52
 
 
 
 
53
  if __name__ == "__main__":
54
  # --- START UI ---
55
  st.title("Finetune LoRA inference")
.ipynb_checkpoints/requirements-checkpoint.txt CHANGED
@@ -1,5 +1,6 @@
1
  transformers
2
  diffusers
 
3
  huggingface_hub
4
  streamlit
5
  torch
 
1
  transformers
2
  diffusers
3
+ accelerate
4
  huggingface_hub
5
  streamlit
6
  torch
app.py CHANGED
@@ -24,7 +24,7 @@ def inference(prompt, model, n_images, seed):
24
  print(f"Inferencing '{prompt}' for {n_images} images.")
25
 
26
  for i in range(n_images):
27
- result = pipe(prompt, generator=generators[i], num_inference_steps=25).images[0]
28
  result_images.append(result)
29
 
30
  # Start with empty UI elements
@@ -50,9 +50,6 @@ def inference(prompt, model, n_images, seed):
50
  st.image(result_images[i], caption=f"Image - {i+3}")
51
 
52
 
53
- def main():
54
- pass
55
-
56
  if __name__ == "__main__":
57
  # --- START UI ---
58
  st.title("Finetune LoRA inference")
 
24
  print(f"Inferencing '{prompt}' for {n_images} images.")
25
 
26
  for i in range(n_images):
27
+ result = pipe(prompt, generator=generators[i], num_inference_steps=5).images[0]
28
  result_images.append(result)
29
 
30
  # Start with empty UI elements
 
50
  st.image(result_images[i], caption=f"Image - {i+3}")
51
 
52
 
 
 
 
53
  if __name__ == "__main__":
54
  # --- START UI ---
55
  st.title("Finetune LoRA inference")
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
  transformers
2
  diffusers
 
3
  huggingface_hub
4
  streamlit
5
  torch
 
1
  transformers
2
  diffusers
3
+ accelerate
4
  huggingface_hub
5
  streamlit
6
  torch