daquanzhou commited on
Commit
4d432fd
β€’
1 Parent(s): 71b4178

add motion lora and examples

Browse files
app.py CHANGED
@@ -154,11 +154,16 @@ class MagicMeController:
154
  self.id_embed_dir = "models/embeddings"
155
  self.save_dir = "output"
156
  self.base_model_dir = "models/checkpoints"
 
157
  self.selected_base_model = "realisticVision_v51.safetensors"
 
 
 
158
  self.id_embed_list = []
159
  self.woman_id_embed_list = ["beyonce", "hermione", "lifeifei", "lisa", "mona", "monroe", "taylor", "scarlett"]
160
  self.refresh_id_embed_list()
161
  self.refresh_base_model_list()
 
162
 
163
  with torch.inference_mode():
164
  vaeloader = VAELoader()
@@ -201,7 +206,7 @@ class MagicMeController:
201
 
202
  ade_animatediffloraloader = NODE_CLASS_MAPPINGS["ADE_AnimateDiffLoRALoader"]()
203
  self.ade_animatediffloraloader_196 = ade_animatediffloraloader.load_motion_lora(
204
- lora_name="v2_lora_ZoomIn.ckpt", strength=0.6
205
  )
206
 
207
  impactint = NODE_CLASS_MAPPINGS["ImpactInt"]()
@@ -235,9 +240,23 @@ class MagicMeController:
235
  id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
236
  self.id_embed_list = [Path(p).stem for p in id_embed_list]
237
 
 
 
 
 
 
 
238
  def refresh_base_model_list(self):
239
  base_model_list = glob(os.path.join(self.base_model_dir, "*.safetensors"))
240
  self.base_model_list = [os.path.basename(p)for p in base_model_list]
 
 
 
 
 
 
 
 
241
 
242
 
243
  def update_base_model(self, base_model_dropdown):
@@ -248,10 +267,18 @@ class MagicMeController:
248
  )
249
  return gr.Dropdown.update()
250
 
251
-
 
 
 
 
 
 
252
 
253
- def run_t2v_face_tiled(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
 
254
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
 
255
 
256
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
257
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
@@ -477,14 +504,17 @@ class MagicMeController:
477
  "n_prompt": negative_prompt_text_box,
478
  "id_embed_dropdown": id_embed_dropdown,
479
  "gaussian_slider": gaussian_slider,
480
- "seed_text_box": seed_text_box
 
 
481
  }
482
  return gr.Video.update(value=orig_video_path), gr.Video.update(value=face_detailer_video_path),gr.Video.update(value=sr_video_path), gr.Json.update(value=json_config)
483
 
484
 
485
 
486
- def run_t2v_face(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
487
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
 
488
 
489
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
490
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
@@ -657,15 +687,18 @@ class MagicMeController:
657
  "n_prompt": negative_prompt_text_box,
658
  "id_embed_dropdown": id_embed_dropdown,
659
  "gaussian_slider": gaussian_slider,
660
- "seed_text_box": seed_text_box
 
 
661
  }
662
  return gr.Video.update(value=orig_video_path), gr.Video.update(value=face_detailer_video_path), gr.Json.update(value=json_config)
663
 
664
 
665
 
666
 
667
- def run_t2v(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
668
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
 
669
 
670
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
671
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
@@ -781,12 +814,15 @@ class MagicMeController:
781
  orig_video_path = sorted(glob(os.path.join(self.save_dir, 'orig*.mp4')))[-1]
782
 
783
  json_config = {
 
 
784
  "prompt": prompt,
785
  "n_prompt": negative_prompt_text_box,
786
  "id_embed_dropdown": id_embed_dropdown,
787
  "gaussian_slider": gaussian_slider,
788
- "seed_text_box": seed_text_box
789
  }
 
790
  return gr.Video.update(value=orig_video_path), gr.Json.update(value=json_config)
791
 
792
 
@@ -807,46 +843,26 @@ css = """
807
 
808
 
809
  examples = [
810
- # 1-ToonYou
811
  [
812
- # "toonyou_beta3.safetensors",
813
- # "mm_sd_v14.ckpt",
814
- "masterpiece, best quality, 1girl, solo, cherry blossoms, hanami, pink flower, white flower, spring season, wisteria, petals, flower, plum blossoms, outdoors, falling petals, white hair, black eyes",
815
- "worst quality, low quality, nsfw, logo",
816
- # 512, 512, "13204175718326964000"
 
 
817
  ],
818
- # 2-Lyriel
819
  [
820
- # "lyriel_v16.safetensors",
821
- # "mm_sd_v15.ckpt",
822
- "A forbidden castle high up in the mountains, pixel art, intricate details2, hdr, intricate details, hyperdetailed5, natural skin texture, hyperrealism, soft light, sharp, game art, key visual, surreal",
823
- "3d, cartoon, anime, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, bad anatomy, girl, loli, young, large breasts, red eyes, muscular",
824
- # 512, 512, "6681501646976930000"
 
 
825
  ],
826
- # 3-RCNZ
827
- [
828
- # "rcnzCartoon3d_v10.safetensors",
829
- # "mm_sd_v14.ckpt",
830
- "Jane Eyre with headphones, natural skin texture,4mm,k textures, soft cinematic light, adobe lightroom, photolab, hdr, intricate, elegant, highly detailed, sharp focus, cinematic look, soothing tones, insane details, intricate details, hyperdetailed, low contrast, soft cinematic light, dim colors, exposure blend, hdr, faded",
831
- "deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
832
- # 512, 512, "2416282124261060"
833
- ],
834
- # 4-MajicMix
835
- [
836
- # "majicmixRealistic_v5Preview.safetensors",
837
- # "mm_sd_v14.ckpt",
838
- "1girl, offshoulder, light smile, shiny skin best quality, masterpiece, photorealistic",
839
- "bad hand, worst quality, low quality, normal quality, lowres, bad anatomy, bad hands, watermark, moles",
840
- # 512, 512, "7132772652786303"
841
- ],
842
- # 5-RealisticVision
843
- [
844
- # "realisticVisionV20_v20.safetensors",
845
- # "mm_sd_v15.ckpt",
846
- "photo of coastline, rocks, storm weather, wind, waves, lightning, 8k uhd, dslr, soft lighting, high quality, film grain, Fujifilm XT3",
847
- "blur, haze, deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers, deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation",
848
- # 512, 512, "1490157606650685400"
849
- ]
850
  ]
851
 
852
 
@@ -863,9 +879,9 @@ def ui():
863
  gr.Markdown(
864
  """
865
  ### Quick Start
866
- 1. Select desired `ID embedding`.
867
  2. Provide `Prompt` and `Negative Prompt`. Please use propoer pronoun for the character's gender.
868
- 3. Click on one of three `Go buttons. The fewer the running modules, the less time you need to wait. Enjoy!
869
  """
870
  )
871
  with gr.Row():
@@ -875,32 +891,32 @@ def ui():
875
  prompt_textbox = gr.Textbox( label="Prompt", info="a photo of <V*> man/woman ", lines=3, value="in superman costume in the outer space, stars in the background" )
876
  negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream")
877
  with gr.Row():
878
- seed_textbox = gr.Textbox( label="Seed", value=random.randint(1, 2 ** 32))
879
  seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
880
  seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
881
 
882
 
883
  with gr.Column():
884
  with gr.Accordion("Advance", open=False):
885
- with gr.Row():
886
- base_model_dropdown = gr.Dropdown( label="Base DreamBooth Model", choices=c.base_model_list, value=c.selected_base_model, interactive=True )
887
- base_model_dropdown.change(fn=c.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
 
888
 
889
- with gr.Row():
890
- gaussian_slider = gr.Slider( label="3D Gaussian Noise Covariance", value=0.2, minimum=0, maximum=1, step=0.05 )
891
  json_config = gr.Json(label="Output Config", value=None )
892
 
893
  with gr.Row():
894
  generate_button_t2v = gr.Button( value="Go (T2V VCD)", variant='primary' )
895
- generate_button_face = gr.Button( value="Go (T2V + Face VCD)", variant='primary' )
896
- generate_button_tiled = gr.Button( value="Go (T2V + Face + Tiled VCD)", variant='primary' )
897
 
898
  with gr.Row():
899
  orig_video = gr.Video( label="Video after T2I VCD", interactive=False )
900
  face_detailer_video = gr.Video( label="Video after Face VCD", interactive=False )
901
  sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
902
 
903
- inputs = [base_model_dropdown, prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
904
  outputs_t2v = [orig_video, json_config]
905
  outputs_t2v_face = [orig_video, face_detailer_video, json_config]
906
  outputs_t2v_face_tiled = [orig_video, face_detailer_video, sr_video, json_config]
@@ -909,7 +925,7 @@ def ui():
909
  generate_button_face.click( fn=c.run_t2v_face, inputs=inputs, outputs=outputs_t2v_face )
910
  generate_button_tiled.click( fn=c.run_t2v_face_tiled, inputs=inputs, outputs=outputs_t2v_face_tiled )
911
 
912
- # gr.Examples( fn=c.run_once, examples=examples, inputs=inputs, outputs=outputs, cache_examples=True )
913
 
914
  return demo
915
 
 
154
  self.id_embed_dir = "models/embeddings"
155
  self.save_dir = "output"
156
  self.base_model_dir = "models/checkpoints"
157
+ self.base_model_list = []
158
  self.selected_base_model = "realisticVision_v51.safetensors"
159
+ self.motion_lora_dir = "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora"
160
+ self.motion_lora_list = []
161
+ self.selected_motion_lora = "v2_lora_ZoomIn.ckpt"
162
  self.id_embed_list = []
163
  self.woman_id_embed_list = ["beyonce", "hermione", "lifeifei", "lisa", "mona", "monroe", "taylor", "scarlett"]
164
  self.refresh_id_embed_list()
165
  self.refresh_base_model_list()
166
+ self.refresh_motion_lora_list()
167
 
168
  with torch.inference_mode():
169
  vaeloader = VAELoader()
 
206
 
207
  ade_animatediffloraloader = NODE_CLASS_MAPPINGS["ADE_AnimateDiffLoRALoader"]()
208
  self.ade_animatediffloraloader_196 = ade_animatediffloraloader.load_motion_lora(
209
+ lora_name=self.selected_motion_lora, strength=0.6
210
  )
211
 
212
  impactint = NODE_CLASS_MAPPINGS["ImpactInt"]()
 
240
  id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
241
  self.id_embed_list = [Path(p).stem for p in id_embed_list]
242
 
243
+
244
+ def refresh_motion_lora_list(self):
245
+ motion_lora_list = glob(os.path.join(self.motion_lora_list, "*.ckpt"))
246
+ self.motion_lora_list = [os.path.basename(p)for p in motion_lora_list]
247
+
248
+
249
  def refresh_base_model_list(self):
250
  base_model_list = glob(os.path.join(self.base_model_dir, "*.safetensors"))
251
  self.base_model_list = [os.path.basename(p)for p in base_model_list]
252
+
253
+ def update_motion_lora(self, base_model_dropdown):
254
+ self.selected_base_model = base_model_dropdown
255
+ checkpointloadersimple = CheckpointLoaderSimple()
256
+ self.checkpointloadersimple_32 = checkpointloadersimple.load_checkpoint(
257
+ ckpt_name=self.selected_base_model
258
+ )
259
+ return gr.Dropdown.update()
260
 
261
 
262
  def update_base_model(self, base_model_dropdown):
 
267
  )
268
  return gr.Dropdown.update()
269
 
270
+ def update_motion_lora(self, motion_lora_dropdown):
271
+ self.selected_motion_lora = motion_lora_dropdown
272
+ ade_animatediffloraloader = NODE_CLASS_MAPPINGS["ADE_AnimateDiffLoRALoader"]()
273
+ self.ade_animatediffloraloader_196 = ade_animatediffloraloader.load_motion_lora(
274
+ lora_name=self.selected_motion_lora, strength=0.6
275
+ )
276
+ return gr.Dropdown.update()
277
 
278
+
279
+ def run_t2v_face_tiled(self, base_model_dropdown, motion_lora_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
280
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
281
+ if self.selected_motion_lora != motion_lora_dropdown: self.update_motion_lora(motion_lora_dropdown)
282
 
283
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
284
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
 
504
  "n_prompt": negative_prompt_text_box,
505
  "id_embed_dropdown": id_embed_dropdown,
506
  "gaussian_slider": gaussian_slider,
507
+ "seed_text_box": seed_text_box,
508
+ "motion_lora_dropdown": motion_lora_dropdown,
509
+ "base_model_dropdown": base_model_dropdown
510
  }
511
  return gr.Video.update(value=orig_video_path), gr.Video.update(value=face_detailer_video_path),gr.Video.update(value=sr_video_path), gr.Json.update(value=json_config)
512
 
513
 
514
 
515
+ def run_t2v_face(self, base_model_dropdown, motion_lora_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
516
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
517
+ if self.selected_motion_lora != motion_lora_dropdown: self.update_motion_lora(motion_lora_dropdown)
518
 
519
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
520
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
 
687
  "n_prompt": negative_prompt_text_box,
688
  "id_embed_dropdown": id_embed_dropdown,
689
  "gaussian_slider": gaussian_slider,
690
+ "seed_text_box": seed_text_box,
691
+ "motion_lora_dropdown": motion_lora_dropdown,
692
+ "base_model_dropdown": base_model_dropdown
693
  }
694
  return gr.Video.update(value=orig_video_path), gr.Video.update(value=face_detailer_video_path), gr.Json.update(value=json_config)
695
 
696
 
697
 
698
 
699
+ def run_t2v(self, base_model_dropdown, motion_lora_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
700
  if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
701
+ if self.selected_motion_lora != motion_lora_dropdown: self.update_motion_lora(motion_lora_dropdown)
702
 
703
  category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
704
  prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
 
814
  orig_video_path = sorted(glob(os.path.join(self.save_dir, 'orig*.mp4')))[-1]
815
 
816
  json_config = {
817
+ "base_model_dropdown": base_model_dropdown,
818
+ "motion_lora_dropdown": motion_lora_dropdown,
819
  "prompt": prompt,
820
  "n_prompt": negative_prompt_text_box,
821
  "id_embed_dropdown": id_embed_dropdown,
822
  "gaussian_slider": gaussian_slider,
823
+ "seed_text_box": seed_text_box,
824
  }
825
+
826
  return gr.Video.update(value=orig_video_path), gr.Json.update(value=json_config)
827
 
828
 
 
843
 
844
 
845
  examples = [
846
+ # 1-Realistic Vision
847
  [
848
+ "realisticVision_v51.safetensors",
849
+ "v2_lora_ZoomIn.ckpt",
850
+ "a photo of embedding:altman man in superman costume in the outer space, stars in the background",
851
+ "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream",
852
+ "altman",
853
+ 0.2,
854
+ 3323153235
855
  ],
856
+ # 2-RCNZ
857
  [
858
+ "rcnzCartoon3d_v10.safetensors",
859
+ "v2_lora_ZoomIn.ckpt",
860
+ "a photo of embedding:altman man in superman costume in the outer space, stars in the background",
861
+ "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream",
862
+ "altman",
863
+ 0.2,
864
+ 4164379572666061
865
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
866
  ]
867
 
868
 
 
879
  gr.Markdown(
880
  """
881
  ### Quick Start
882
+ 1. Select desired `ID embedding`. There are more advanced settings in the drop-down menu `Advanced`.
883
  2. Provide `Prompt` and `Negative Prompt`. Please use propoer pronoun for the character's gender.
884
+ 3. Click on one of three `Go` buttons. The fewer the running modules, the less time you need to wait. Enjoy!
885
  """
886
  )
887
  with gr.Row():
 
891
  prompt_textbox = gr.Textbox( label="Prompt", info="a photo of <V*> man/woman ", lines=3, value="in superman costume in the outer space, stars in the background" )
892
  negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream")
893
  with gr.Row():
894
+ seed_textbox = gr.Textbox( label="Seed (change to get various videos)", value=random.randint(1, 2 ** 32))
895
  seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
896
  seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
897
 
898
 
899
  with gr.Column():
900
  with gr.Accordion("Advance", open=False):
901
+ base_model_dropdown = gr.Dropdown( label="Base DreamBooth Model", choices=c.base_model_list, value=c.selected_base_model, interactive=True)
902
+ base_model_dropdown.change(fn=c.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
903
+ motion_lora_dropdown = gr.Dropdown( label="Motion LoRA Model", choices=c.motion_lora_list, value=c.selected_motion_lora, interactive=True)
904
+ motion_lora_dropdown.change(fn=c.update_motion_lora, inputs=[motion_lora_dropdown], outputs=[motion_lora_dropdown])
905
 
906
+ gaussian_slider = gr.Slider( label="3D Gaussian Noise Covariance", value=0.2, minimum=0, maximum=1, step=0.05 )
 
907
  json_config = gr.Json(label="Output Config", value=None )
908
 
909
  with gr.Row():
910
  generate_button_t2v = gr.Button( value="Go (T2V VCD)", variant='primary' )
911
+ generate_button_face = gr.Button( value="Go (T2V + Face VCD, 2X slower)", variant='primary' )
912
+ generate_button_tiled = gr.Button( value="Go (T2V + Face + Tiled VCD, 8X slower)", variant='primary' )
913
 
914
  with gr.Row():
915
  orig_video = gr.Video( label="Video after T2I VCD", interactive=False )
916
  face_detailer_video = gr.Video( label="Video after Face VCD", interactive=False )
917
  sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
918
 
919
+ inputs = [base_model_dropdown, motion_lora_dropdown, prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
920
  outputs_t2v = [orig_video, json_config]
921
  outputs_t2v_face = [orig_video, face_detailer_video, json_config]
922
  outputs_t2v_face_tiled = [orig_video, face_detailer_video, sr_video, json_config]
 
925
  generate_button_face.click( fn=c.run_t2v_face, inputs=inputs, outputs=outputs_t2v_face )
926
  generate_button_tiled.click( fn=c.run_t2v_face_tiled, inputs=inputs, outputs=outputs_t2v_face_tiled )
927
 
928
+ gr.Examples( fn=c.run_t2v_face_tiled, examples=examples, inputs=inputs, outputs=outputs_t2v_face_tiled, cache_examples=True )
929
 
930
  return demo
931
 
models/checkpoints/toonyou_beta3.safetensors β†’ custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_PanLeft.ckpt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52768d2bc4e73af920df3466d0806d9a40727dab0869fc7aa2f3bc82214bb071
3
- size 2132626252
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed79025f8bea018c8925f43b6304a27e462335b6ec5e6f8a222c2726153844b3
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_PanRight.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4eb9154623c628c76dbd83109f125617c985490fec36ddca5464eb61ac7f6d5
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_RollingAnticlockwise.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae6cbc81044895243bba9a64df9666db763a52acfd8e496c490af84e812748a
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_RollingClockwise.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361b1af8500d7fd09c2f884fac5dc0397a4323bae8fab5233443de5383d13630
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_TiltDown.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e4d5448aba4ea51b3bcd4b5d2b058ed4b47bb72d94d8c05a3ccce3368db6d9
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_TiltUp.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ee2f181fc69d7fe26e013ad5cfea11f25cb9f5e8fded3c9942b61803cd6c3d
3
+ size 77474499
custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora/v2_lora_ZoomOut.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4172fb2d36410ef638ae0e29d604b66c11ee44b94db9c7cc5ee34d7f865c55d9
3
+ size 77474499