mrcuddle commited on
Commit
1f6711e
·
verified ·
1 Parent(s): 086180d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -3
app.py CHANGED
@@ -3,9 +3,7 @@ import torch
3
  from diffusers import I2VGenXLPipeline
4
  from diffusers.utils import export_to_gif, load_image
5
  import tempfile
6
- import spaces
7
 
8
- @spaces.GPU
9
  def initialize_pipeline():
10
  # Initialize the pipeline without CUDA support
11
  pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
@@ -71,4 +69,21 @@ with gr.Blocks() as demo:
71
  with gr.Row():
72
  with gr.Column():
73
  image_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
74
- image_input = gr.Image(type="filepath"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from diffusers import I2VGenXLPipeline
4
  from diffusers.utils import export_to_gif, load_image
5
  import tempfile
 
6
 
 
7
  def initialize_pipeline():
8
  # Initialize the pipeline without CUDA support
9
  pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
 
69
  with gr.Row():
70
  with gr.Column():
71
  image_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
72
+ image_input = gr.Image(type="filepath", label="Input Image")
73
+ image_negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt")
74
+ image_num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps")
75
+ image_guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale")
76
+ image_seed = gr.Number(label="Seed", value=8888)
77
+ image_generate_button = gr.Button("Generate GIF")
78
+
79
+ with gr.Column():
80
+ image_output_video = gr.Video(label="Generated GIF")
81
+
82
+ image_generate_button.click(
83
+ fn=generate_gif,
84
+ inputs=[image_prompt, image_input, image_negative_prompt, image_num_inference_steps, image_guidance_scale, image_seed],
85
+ outputs=image_output_video
86
+ )
87
+
88
+ # Launch the interface
89
+ demo.launch()