Update app.py
Browse files
app.py
CHANGED
@@ -158,7 +158,7 @@ preset_options = [
|
|
158 |
{"label": "896x608, 73 frames", "width": 896, "height": 608, "num_frames": 73},
|
159 |
{"label": "896x544, 81 frames", "width": 896, "height": 544, "num_frames": 81},
|
160 |
{"label": "832x544, 89 frames", "width": 832, "height": 544, "num_frames": 89},
|
161 |
-
{"label": "768x768, 200 frames", "width": 768, "height": 768, "num_frames":
|
162 |
{"label": "768x512, 97 frames", "width": 768, "height": 512, "num_frames": 97},
|
163 |
{"label": "800x480, 105 frames", "width": 800, "height": 480, "num_frames": 105},
|
164 |
{"label": "736x480, 113 frames", "width": 736, "height": 480, "num_frames": 113},
|
@@ -250,7 +250,7 @@ def generate_video_from_text(
|
|
250 |
"media_items": None,
|
251 |
}
|
252 |
|
253 |
-
generator = torch.Generator(device="
|
254 |
|
255 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
256 |
progress((step + 1) / num_inference_steps)
|
@@ -346,7 +346,7 @@ def generate_video_from_image(
|
|
346 |
"media_items": media_items,
|
347 |
}
|
348 |
|
349 |
-
generator = torch.Generator(device="
|
350 |
|
351 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
352 |
progress((step + 1) / num_inference_steps)
|
|
|
158 |
{"label": "896x608, 73 frames", "width": 896, "height": 608, "num_frames": 73},
|
159 |
{"label": "896x544, 81 frames", "width": 896, "height": 544, "num_frames": 81},
|
160 |
{"label": "832x544, 89 frames", "width": 832, "height": 544, "num_frames": 89},
|
161 |
+
{"label": "768x768, 200 frames", "width": 768, "height": 768, "num_frames": 140},
|
162 |
{"label": "768x512, 97 frames", "width": 768, "height": 512, "num_frames": 97},
|
163 |
{"label": "800x480, 105 frames", "width": 800, "height": 480, "num_frames": 105},
|
164 |
{"label": "736x480, 113 frames", "width": 736, "height": 480, "num_frames": 113},
|
|
|
250 |
"media_items": None,
|
251 |
}
|
252 |
|
253 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
254 |
|
255 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
256 |
progress((step + 1) / num_inference_steps)
|
|
|
346 |
"media_items": media_items,
|
347 |
}
|
348 |
|
349 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
350 |
|
351 |
def gradio_progress_callback(self, step, timestep, kwargs):
|
352 |
progress((step + 1) / num_inference_steps)
|