KingNish commited on
Commit
fcbac04
·
verified ·
1 Parent(s): e69fdda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -24
app.py CHANGED
@@ -10,6 +10,10 @@ from huggingface_hub import hf_hub_download
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
 
 
 
 
13
  # Constants
14
  bases = {
15
  "Cartoon": "frankjoshua/toonyou_beta6",
@@ -17,29 +21,41 @@ bases = {
17
  "3d": "Lykon/DreamShaper",
18
  "Anime": "Yntec/mistoonAnime2"
19
  }
 
 
 
20
 
21
- # Preload models
22
  if not torch.cuda.is_available():
23
  raise NotImplementedError("No GPU detected!")
24
 
25
  device = "cuda"
26
  dtype = torch.float16
 
 
27
 
28
- motion_loaded = None
 
29
 
30
- pipes = {}
31
- for base_name, base_path in bases.items():
32
- pipe = AnimateDiffPipeline.from_pretrained(base_path, torch_dtype=dtype).to(device)
33
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
34
- pipes[base_name] = pipe
35
 
36
  # Function
37
  @spaces.GPU(duration=60,queue=False)
38
- def generate_image(prompt, base="Realistic", motion="Default", step=8, progress=gr.Progress()):
39
- global pipes
 
40
  global motion_loaded
 
 
 
 
 
 
 
41
 
42
- pipe = pipes[base]
 
 
43
 
44
  if motion_loaded != motion:
45
  pipe.unload_lora_weights()
@@ -48,16 +64,6 @@ def generate_image(prompt, base="Realistic", motion="Default", step=8, progress=
48
  pipe.set_adapters(["motion"], [0.7])
49
  motion_loaded = motion
50
 
51
-
52
- # Load step model if not already loaded
53
- repo = "ByteDance/AnimateDiff-Lightning"
54
- ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
55
- try:
56
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt, local_files_only=True), device=device), strict=False)
57
- except:
58
- pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
59
-
60
- # Generate image
61
  output = pipe(prompt=f"{base} image of {prompt}", guidance_scale=1.2, num_inference_steps=step)
62
 
63
  name = str(uuid.uuid4()).replace("-", "")
@@ -66,7 +72,6 @@ def generate_image(prompt, base="Realistic", motion="Default", step=8, progress=
66
  return path
67
 
68
 
69
-
70
  # Gradio Interface
71
  with gr.Blocks(css="style.css") as demo:
72
  gr.HTML(
@@ -90,7 +95,7 @@ with gr.Blocks(css="style.css") as demo:
90
  "3d",
91
  "Anime",
92
  ],
93
- value="Realistic",
94
  interactive=True
95
  )
96
  select_motion = gr.Dropdown(
@@ -157,7 +162,9 @@ with gr.Blocks(css="style.css") as demo:
157
  fn=generate_image,
158
  inputs=[prompt],
159
  outputs=[video],
160
- cache_examples="lazy",
161
  )
162
 
163
- demo.queue().launch()
 
 
 
10
  from safetensors.torch import load_file
11
  from PIL import Image
12
 
13
+ MORE = """ ## TRY Other Demos
14
+ ### Instant Image: 4k images in 5 Second -> https://huggingface.co/spaces/KingNish/Instant-Image
15
+ """
16
+
17
  # Constants
18
  bases = {
19
  "Cartoon": "frankjoshua/toonyou_beta6",
 
21
  "3d": "Lykon/DreamShaper",
22
  "Anime": "Yntec/mistoonAnime2"
23
  }
24
+ step_loaded = None
25
+ base_loaded = "Realistic"
26
+ motion_loaded = None
27
 
28
+ # Ensure model and scheduler are initialized in GPU-enabled function
29
  if not torch.cuda.is_available():
30
  raise NotImplementedError("No GPU detected!")
31
 
32
  device = "cuda"
33
  dtype = torch.float16
34
+ pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
35
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
36
 
37
+ # Safety checkers
38
+ from transformers import CLIPFeatureExtractor
39
 
40
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
 
 
 
 
41
 
42
  # Function
43
  @spaces.GPU(duration=60,queue=False)
44
+ def generate_image(prompt, base="Realistic", motion="", step=8, progress=gr.Progress()):
45
+ global step_loaded
46
+ global base_loaded
47
  global motion_loaded
48
+ print(prompt, base, step)
49
+
50
+ if step_loaded != step:
51
+ repo = "ByteDance/AnimateDiff-Lightning"
52
+ ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
53
+ pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
54
+ step_loaded = step
55
 
56
+ if base_loaded != base:
57
+ pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
58
+ base_loaded = base
59
 
60
  if motion_loaded != motion:
61
  pipe.unload_lora_weights()
 
64
  pipe.set_adapters(["motion"], [0.7])
65
  motion_loaded = motion
66
 
 
 
 
 
 
 
 
 
 
 
67
  output = pipe(prompt=f"{base} image of {prompt}", guidance_scale=1.2, num_inference_steps=step)
68
 
69
  name = str(uuid.uuid4()).replace("-", "")
 
72
  return path
73
 
74
 
 
75
  # Gradio Interface
76
  with gr.Blocks(css="style.css") as demo:
77
  gr.HTML(
 
95
  "3d",
96
  "Anime",
97
  ],
98
+ value=base_loaded,
99
  interactive=True
100
  )
101
  select_motion = gr.Dropdown(
 
162
  fn=generate_image,
163
  inputs=[prompt],
164
  outputs=[video],
165
+ cache_examples=True,
166
  )
167
 
168
+ demo.queue().launch()
169
+
170
+ Translate