Spaces:
Sleeping
Sleeping
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ formatter = logging.Formatter('\n >>> [%(levelname)s] %(asctime)s %(name)s: %(me
|
|
| 22 |
handler2.setFormatter(formatter)
|
| 23 |
root.addHandler(handler2)
|
| 24 |
|
| 25 |
-
def
|
| 26 |
if dry_run:
|
| 27 |
print(f"--> {cmd}")
|
| 28 |
result = 1
|
|
@@ -36,28 +36,34 @@ def cmd(cmd, assert_success=False, capture_output=False, env=None, dry_run=False
|
|
| 36 |
|
| 37 |
return result
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
import
|
| 45 |
-
import
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
from
|
| 49 |
-
#from
|
| 50 |
-
|
| 51 |
-
#from
|
| 52 |
-
#from
|
| 53 |
-
from
|
| 54 |
-
from diffusers.
|
| 55 |
-
from
|
| 56 |
-
from
|
| 57 |
-
from
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
last_motion=None
|
| 63 |
dtype = torch.float16
|
|
@@ -67,7 +73,7 @@ device = "cuda"
|
|
| 67 |
#ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 68 |
base = "emilianJR/epiCRealism"
|
| 69 |
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
| 70 |
-
|
| 71 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
| 72 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
|
| 73 |
|
|
@@ -76,7 +82,7 @@ fps=10
|
|
| 76 |
time=1
|
| 77 |
width=384
|
| 78 |
height=768
|
| 79 |
-
step
|
| 80 |
accu=10
|
| 81 |
|
| 82 |
css="""
|
|
@@ -223,7 +229,7 @@ def infer(pm):
|
|
| 223 |
export_to_gif(out.frames[0],name,fps=fps)
|
| 224 |
return name
|
| 225 |
|
| 226 |
-
def
|
| 227 |
|
| 228 |
p1_en = translate(p1,"english")
|
| 229 |
p2_en = translate(p2,"english")
|
|
@@ -235,7 +241,7 @@ def run(i,m,p1,p2,*result):
|
|
| 235 |
with Pool(f'{ ln }:ppn=2', queue='productionQ', timelimit='5:00:00', workdir='.') as pool:
|
| 236 |
return pool.map(infer,arr)
|
| 237 |
|
| 238 |
-
pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
| 239 |
pipe.scheduler = DDIMScheduler(
|
| 240 |
clip_sample=False,
|
| 241 |
beta_start=0.00085,
|
|
@@ -299,6 +305,6 @@ with gr.Blocks(theme=gr.themes.Soft(),css=css,js=js) as demo:
|
|
| 299 |
|
| 300 |
gr.on(
|
| 301 |
triggers=[run_button.click, prompt.submit, prompt2.submit],
|
| 302 |
-
fn=
|
| 303 |
)
|
| 304 |
demo.queue().launch()
|
|
|
|
| 22 |
handler2.setFormatter(formatter)
|
| 23 |
root.addHandler(handler2)
|
| 24 |
|
| 25 |
+
def run(cmd, assert_success=False, capture_output=False, env=None, dry_run=False):
|
| 26 |
if dry_run:
|
| 27 |
print(f"--> {cmd}")
|
| 28 |
result = 1
|
|
|
|
| 36 |
|
| 37 |
return result
|
| 38 |
|
| 39 |
+
run("apt install python3-mpi4py")
|
| 40 |
+
run"pip install -r req.txt")
|
| 41 |
|
| 42 |
+
def deps():
|
| 43 |
+
try:
|
| 44 |
+
import spaces
|
| 45 |
+
import torch
|
| 46 |
+
import gradio as gr
|
| 47 |
+
import numpy as np
|
| 48 |
+
from lxml.html import fromstring
|
| 49 |
+
#from transformers import pipeline
|
| 50 |
+
from torch import multiprocessing as mp, nn
|
| 51 |
+
#from torch.multiprocessing import Pool
|
| 52 |
+
#from pathos.multiprocessing import ProcessPool as Pool
|
| 53 |
+
#from pathos.threading import ThreadPool as Pool
|
| 54 |
+
#from diffusers.pipelines.flux import FluxPipeline
|
| 55 |
+
from diffusers.utils import export_to_gif, load_image
|
| 56 |
+
from diffusers.models.modeling_utils import ModelMixin
|
| 57 |
+
from huggingface_hub import hf_hub_download
|
| 58 |
+
from safetensors.torch import load_file, save_file
|
| 59 |
+
from diffusers import DiffusionPipeline, AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL, UNet3DConditionModel
|
| 60 |
+
#import jax
|
| 61 |
+
#import jax.numpy as jnp
|
| 62 |
+
from pyina.launchers import TorqueMpiPool as Pool
|
| 63 |
+
except:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
deps()
|
| 67 |
|
| 68 |
last_motion=None
|
| 69 |
dtype = torch.float16
|
|
|
|
| 73 |
#ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 74 |
base = "emilianJR/epiCRealism"
|
| 75 |
#base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
| 76 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
|
| 77 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
| 78 |
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
|
| 79 |
|
|
|
|
| 82 |
time=1
|
| 83 |
width=384
|
| 84 |
height=768
|
| 85 |
+
step=40
|
| 86 |
accu=10
|
| 87 |
|
| 88 |
css="""
|
|
|
|
| 229 |
export_to_gif(out.frames[0],name,fps=fps)
|
| 230 |
return name
|
| 231 |
|
| 232 |
+
def main(i,m,p1,p2,*result):
|
| 233 |
|
| 234 |
p1_en = translate(p1,"english")
|
| 235 |
p2_en = translate(p2,"english")
|
|
|
|
| 241 |
with Pool(f'{ ln }:ppn=2', queue='productionQ', timelimit='5:00:00', workdir='.') as pool:
|
| 242 |
return pool.map(infer,arr)
|
| 243 |
|
| 244 |
+
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
| 245 |
pipe.scheduler = DDIMScheduler(
|
| 246 |
clip_sample=False,
|
| 247 |
beta_start=0.00085,
|
|
|
|
| 305 |
|
| 306 |
gr.on(
|
| 307 |
triggers=[run_button.click, prompt.submit, prompt2.submit],
|
| 308 |
+
fn=main,inputs=[img,motion,prompt,prompt2,*result],outputs=result
|
| 309 |
)
|
| 310 |
demo.queue().launch()
|