Spaces:
Running
Running
daquanzhou
commited on
Commit
•
71b4178
1
Parent(s):
507f91c
add more base models
Browse files
app.py
CHANGED
@@ -153,9 +153,12 @@ class MagicMeController:
|
|
153 |
def __init__(self):
|
154 |
self.id_embed_dir = "models/embeddings"
|
155 |
self.save_dir = "output"
|
|
|
|
|
156 |
self.id_embed_list = []
|
157 |
self.woman_id_embed_list = ["beyonce", "hermione", "lifeifei", "lisa", "mona", "monroe", "taylor", "scarlett"]
|
158 |
-
self.
|
|
|
159 |
|
160 |
with torch.inference_mode():
|
161 |
vaeloader = VAELoader()
|
@@ -165,7 +168,7 @@ class MagicMeController:
|
|
165 |
|
166 |
checkpointloadersimple = CheckpointLoaderSimple()
|
167 |
self.checkpointloadersimple_32 = checkpointloadersimple.load_checkpoint(
|
168 |
-
ckpt_name=
|
169 |
)
|
170 |
|
171 |
|
@@ -228,13 +231,28 @@ class MagicMeController:
|
|
228 |
|
229 |
|
230 |
|
231 |
-
def
|
232 |
id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
|
233 |
self.id_embed_list = [Path(p).stem for p in id_embed_list]
|
234 |
|
|
|
|
|
|
|
235 |
|
236 |
|
237 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
239 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
240 |
print("prompt:", prompt)
|
@@ -465,7 +483,9 @@ class MagicMeController:
|
|
465 |
|
466 |
|
467 |
|
468 |
-
def run_t2v_face(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
|
|
|
|
469 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
470 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
471 |
print("prompt:", prompt)
|
@@ -644,7 +664,9 @@ class MagicMeController:
|
|
644 |
|
645 |
|
646 |
|
647 |
-
def run_t2v(self, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
|
|
|
|
648 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
649 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
650 |
print("prompt:", prompt)
|
@@ -852,19 +874,22 @@ def ui():
|
|
852 |
|
853 |
prompt_textbox = gr.Textbox( label="Prompt", info="a photo of <V*> man/woman ", lines=3, value="in superman costume in the outer space, stars in the background" )
|
854 |
negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream")
|
|
|
|
|
|
|
|
|
855 |
|
856 |
|
857 |
-
|
858 |
-
|
859 |
with gr.Column():
|
860 |
with gr.Accordion("Advance", open=False):
|
861 |
with gr.Row():
|
862 |
-
|
|
|
|
|
863 |
with gr.Row():
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
json_config = gr.Json(label="Config", value=None )
|
868 |
with gr.Row():
|
869 |
generate_button_t2v = gr.Button( value="Go (T2V VCD)", variant='primary' )
|
870 |
generate_button_face = gr.Button( value="Go (T2V + Face VCD)", variant='primary' )
|
@@ -875,7 +900,7 @@ def ui():
|
|
875 |
face_detailer_video = gr.Video( label="Video after Face VCD", interactive=False )
|
876 |
sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
|
877 |
|
878 |
-
inputs = [prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
|
879 |
outputs_t2v = [orig_video, json_config]
|
880 |
outputs_t2v_face = [orig_video, face_detailer_video, json_config]
|
881 |
outputs_t2v_face_tiled = [orig_video, face_detailer_video, sr_video, json_config]
|
|
|
153 |
def __init__(self):
|
154 |
self.id_embed_dir = "models/embeddings"
|
155 |
self.save_dir = "output"
|
156 |
+
self.base_model_dir = "models/checkpoints"
|
157 |
+
self.selected_base_model = "realisticVision_v51.safetensors"
|
158 |
self.id_embed_list = []
|
159 |
self.woman_id_embed_list = ["beyonce", "hermione", "lifeifei", "lisa", "mona", "monroe", "taylor", "scarlett"]
|
160 |
+
self.refresh_id_embed_list()
|
161 |
+
self.refresh_base_model_list()
|
162 |
|
163 |
with torch.inference_mode():
|
164 |
vaeloader = VAELoader()
|
|
|
168 |
|
169 |
checkpointloadersimple = CheckpointLoaderSimple()
|
170 |
self.checkpointloadersimple_32 = checkpointloadersimple.load_checkpoint(
|
171 |
+
ckpt_name=self.selected_base_model
|
172 |
)
|
173 |
|
174 |
|
|
|
231 |
|
232 |
|
233 |
|
234 |
+
def refresh_id_embed_list(self):
|
235 |
id_embed_list = glob(os.path.join(self.id_embed_dir, "*.pt"))
|
236 |
self.id_embed_list = [Path(p).stem for p in id_embed_list]
|
237 |
|
238 |
+
def refresh_base_model_list(self):
|
239 |
+
base_model_list = glob(os.path.join(self.base_model_dir, "*.safetensors"))
|
240 |
+
self.base_model_list = [os.path.basename(p)for p in base_model_list]
|
241 |
|
242 |
|
243 |
+
def update_base_model(self, base_model_dropdown):
|
244 |
+
self.selected_base_model = base_model_dropdown
|
245 |
+
checkpointloadersimple = CheckpointLoaderSimple()
|
246 |
+
self.checkpointloadersimple_32 = checkpointloadersimple.load_checkpoint(
|
247 |
+
ckpt_name=self.selected_base_model
|
248 |
+
)
|
249 |
+
return gr.Dropdown.update()
|
250 |
+
|
251 |
+
|
252 |
+
|
253 |
+
def run_t2v_face_tiled(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
254 |
+
if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
|
255 |
+
|
256 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
257 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
258 |
print("prompt:", prompt)
|
|
|
483 |
|
484 |
|
485 |
|
486 |
+
def run_t2v_face(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
487 |
+
if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
|
488 |
+
|
489 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
490 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
491 |
print("prompt:", prompt)
|
|
|
664 |
|
665 |
|
666 |
|
667 |
+
def run_t2v(self, base_model_dropdown, prompt_text_box, negative_prompt_text_box, id_embed_dropdown, gaussian_slider, seed_text_box):
|
668 |
+
if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown)
|
669 |
+
|
670 |
category = "woman" if id_embed_dropdown in self.woman_id_embed_list else "man"
|
671 |
prompt = f"a photo of embedding:{id_embed_dropdown} {category} " + prompt_text_box
|
672 |
print("prompt:", prompt)
|
|
|
874 |
|
875 |
prompt_textbox = gr.Textbox( label="Prompt", info="a photo of <V*> man/woman ", lines=3, value="in superman costume in the outer space, stars in the background" )
|
876 |
negative_prompt_textbox = gr.Textbox( label="Negative Prompt", lines=3, value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime), text, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, UnrealisticDream")
|
877 |
+
with gr.Row():
|
878 |
+
seed_textbox = gr.Textbox( label="Seed", value=random.randint(1, 2 ** 32))
|
879 |
+
seed_button = gr.Button(value="\U0001F3B2", elem_classes="toolbutton")
|
880 |
+
seed_button.click(fn=lambda: gr.Textbox.update(value=random.randint(1, 1e16)), inputs=[], outputs=[seed_textbox])
|
881 |
|
882 |
|
|
|
|
|
883 |
with gr.Column():
|
884 |
with gr.Accordion("Advance", open=False):
|
885 |
with gr.Row():
|
886 |
+
base_model_dropdown = gr.Dropdown( label="Base DreamBooth Model", choices=c.base_model_list, value=c.selected_base_model, interactive=True )
|
887 |
+
base_model_dropdown.change(fn=c.update_base_model, inputs=[base_model_dropdown], outputs=[base_model_dropdown])
|
888 |
+
|
889 |
with gr.Row():
|
890 |
+
gaussian_slider = gr.Slider( label="3D Gaussian Noise Covariance", value=0.2, minimum=0, maximum=1, step=0.05 )
|
891 |
+
json_config = gr.Json(label="Output Config", value=None )
|
892 |
+
|
|
|
893 |
with gr.Row():
|
894 |
generate_button_t2v = gr.Button( value="Go (T2V VCD)", variant='primary' )
|
895 |
generate_button_face = gr.Button( value="Go (T2V + Face VCD)", variant='primary' )
|
|
|
900 |
face_detailer_video = gr.Video( label="Video after Face VCD", interactive=False )
|
901 |
sr_video = gr.Video( label="Video after Tiled VCD", interactive=False )
|
902 |
|
903 |
+
inputs = [base_model_dropdown, prompt_textbox, negative_prompt_textbox, id_embed_dropdown, gaussian_slider, seed_textbox]
|
904 |
outputs_t2v = [orig_video, json_config]
|
905 |
outputs_t2v_face = [orig_video, face_detailer_video, json_config]
|
906 |
outputs_t2v_face_tiled = [orig_video, face_detailer_video, sr_video, json_config]
|
models/checkpoints/rcnzCartoon3d_v10.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a6b4c0392d7486bfa4fd1a31c7b7d2679f743f8ea8d9f219c82b5c33db31ddb9
|
3 |
+
size 2132625644
|
models/checkpoints/toonyou_beta3.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:52768d2bc4e73af920df3466d0806d9a40727dab0869fc7aa2f3bc82214bb071
|
3 |
+
size 2132626252
|