Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,81 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import os
|
4 |
-
import spaces
|
5 |
import uuid
|
6 |
|
7 |
-
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
|
8 |
-
from diffusers.utils import export_to_video
|
9 |
-
from huggingface_hub import hf_hub_download
|
10 |
-
from safetensors.torch import load_file
|
11 |
-
from PIL import Image
|
12 |
-
|
13 |
MORE = """ ## TRY Other Models
|
14 |
### JARVIS: Your VOICE Assistant -> https://huggingface.co/spaces/KingNish/JARVIS
|
15 |
### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
|
16 |
"""
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
"Cartoon": "frankjoshua/toonyou_beta6",
|
21 |
-
"Realistic": "emilianJR/epiCRealism",
|
22 |
-
"3d": "Lykon/DreamShaper",
|
23 |
-
"Anime": "Yntec/mistoonAnime2"
|
24 |
-
}
|
25 |
-
step_loaded = None
|
26 |
-
base_loaded = "Realistic"
|
27 |
-
motion_loaded = None
|
28 |
-
|
29 |
-
# Ensure model and scheduler are initialized in GPU-enabled function
|
30 |
-
if not torch.cuda.is_available():
|
31 |
-
raise NotImplementedError("No GPU detected!")
|
32 |
-
|
33 |
-
device = "cuda"
|
34 |
-
dtype = torch.float16
|
35 |
-
pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
|
36 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
37 |
-
|
38 |
-
# Safety checkers
|
39 |
-
from transformers import CLIPFeatureExtractor
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
# Function
|
44 |
-
@spaces.GPU(duration=60,queue=False)
|
45 |
def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
55 |
-
step_loaded = step
|
56 |
-
|
57 |
-
if base_loaded != base:
|
58 |
-
pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
|
59 |
-
base_loaded = base
|
60 |
-
|
61 |
-
if motion_loaded != motion:
|
62 |
-
pipe.unload_lora_weights()
|
63 |
-
if motion != "":
|
64 |
-
pipe.load_lora_weights(motion, adapter_name="motion")
|
65 |
-
pipe.set_adapters(["motion"], [0.7])
|
66 |
-
motion_loaded = motion
|
67 |
-
|
68 |
-
progress((0, step))
|
69 |
-
def progress_callback(i, t, z):
|
70 |
-
progress((i+1, step))
|
71 |
-
|
72 |
-
output = pipe(prompt=prompt, guidance_scale=1.2, num_inference_steps=step, callback=progress_callback, callback_steps=1)
|
73 |
-
|
74 |
-
name = str(uuid.uuid4()).replace("-", "")
|
75 |
-
path = f"/tmp/{name}.mp4"
|
76 |
-
export_to_video(output.frames[0], path, fps=10)
|
77 |
-
return path
|
78 |
-
|
79 |
|
80 |
# Gradio Interface
|
81 |
with gr.Blocks(css="style.css") as demo:
|
@@ -83,8 +27,8 @@ with gr.Blocks(css="style.css") as demo:
|
|
83 |
"<h1><center>Instant⚡Video</center></h1>" +
|
84 |
"<p><center><span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.</center></p>" +
|
85 |
"<p><center><strong>First Video Generating takes time then Videos generate faster.</p>" +
|
86 |
-
"<p><center>To get best results Make Sure to Write prompts in style as Given in Examples
|
87 |
-
"<p><a href='https://huggingface.co/spaces/KingNish/Instant-Video/discussions/1'
|
88 |
)
|
89 |
with gr.Group():
|
90 |
with gr.Row():
|
@@ -100,7 +44,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
100 |
"3d",
|
101 |
"Anime",
|
102 |
],
|
103 |
-
value=
|
104 |
interactive=True
|
105 |
)
|
106 |
select_motion = gr.Dropdown(
|
@@ -170,4 +114,4 @@ with gr.Blocks(css="style.css") as demo:
|
|
170 |
cache_examples=True,
|
171 |
)
|
172 |
|
173 |
-
demo.queue().launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from gradio_client import Client, file
|
|
|
|
|
3 |
import uuid
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
MORE = """ ## TRY Other Models
|
6 |
### JARVIS: Your VOICE Assistant -> https://huggingface.co/spaces/KingNish/JARVIS
|
7 |
### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
|
8 |
"""
|
9 |
|
10 |
+
# Gradio Client
|
11 |
+
client = Client("KingNish/Instant-Video")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
# Function
|
|
|
|
|
|
|
14 |
def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
|
15 |
+
result = client.predict(
|
16 |
+
prompt=prompt,
|
17 |
+
base=base,
|
18 |
+
motion=motion,
|
19 |
+
step=step,
|
20 |
+
api_name="/generate_image_1"
|
21 |
+
)
|
22 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# Gradio Interface
|
25 |
with gr.Blocks(css="style.css") as demo:
|
|
|
27 |
"<h1><center>Instant⚡Video</center></h1>" +
|
28 |
"<p><center><span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.</center></p>" +
|
29 |
"<p><center><strong>First Video Generating takes time then Videos generate faster.</p>" +
|
30 |
+
"<p><center>To get best results Make Sure to Write prompts in style as Given in Examples</center></p>" +
|
31 |
+
"<p><a href='https://huggingface.co/spaces/KingNish/Instant-Video/discussions/1'>Must Share your Best Results with Community - Click HERE</a></p>"
|
32 |
)
|
33 |
with gr.Group():
|
34 |
with gr.Row():
|
|
|
44 |
"3d",
|
45 |
"Anime",
|
46 |
],
|
47 |
+
value="Realistic",
|
48 |
interactive=True
|
49 |
)
|
50 |
select_motion = gr.Dropdown(
|
|
|
114 |
cache_examples=True,
|
115 |
)
|
116 |
|
117 |
+
demo.queue().launch()
|