Fucius commited on
Commit
7b57790
1 Parent(s): 22dfda6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -84
app.py CHANGED
@@ -3,15 +3,13 @@ import sys
3
  import os
4
 
5
 
6
- print(os.system(f"pwd"))
7
 
8
-
9
- os.system(f"git clone https://github.com/Curt-Park/yolo-world-with-efficientvit-sam.git")
10
- cwd0 = os.getcwd()
11
- cwd1 = os.path.join(cwd0, "yolo-world-with-efficientvit-sam")
12
- os.chdir(cwd1)
13
- os.system("make setup")
14
- os.system(f"cd /home/user/app")
15
 
16
  sys.path.append('./')
17
  import gradio as gr
@@ -29,7 +27,7 @@ from diffusers.utils import load_image
29
  import cv2
30
  from PIL import Image, ImageOps
31
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
32
- # from controlnet_aux import OpenposeDetector
33
  from controlnet_aux.open_pose.body import Body
34
 
35
  try:
@@ -307,10 +305,10 @@ def resize_and_center_crop(image, output_size=(1024, 576)):
307
  def main(device, segment_type):
308
  pipe, controller, pipe_concept = build_model_sd(args.pretrained_sdxl_model, args.openpose_checkpoint, device, prompts_tmp)
309
 
310
- if segment_type == 'GroundingDINO':
311
- detect_model, sam = build_dino_segment_model(args.dino_checkpoint, args.sam_checkpoint)
312
- else:
313
- detect_model, sam = build_yolo_segment_model(args.efficientViT_checkpoint, device)
314
 
315
  resolution_list = ["1440*728",
316
  "1344*768",
@@ -498,77 +496,87 @@ def main(device, segment_type):
498
  def get_local_value_woman(input):
499
  return character_woman[input][0]
500
 
501
-
502
- with gr.Blocks(css=css) as demo:
503
- # description
504
- gr.Markdown(title)
505
- gr.Markdown(description)
506
-
507
- with gr.Row():
508
- gallery = gr.Image(label="Generated Images", height=512, width=512)
509
- gen_condition = gr.Image(label="Spatial Condition", height=512, width=512)
510
- usage_tips = gr.Markdown(label="Usage tips of OMG", value=tips, visible=False)
511
-
512
- with gr.Row():
513
- condition_img1 = gr.Image(label="Input an RGB image for condition", height=128, width=128)
514
-
515
- # character choose
516
- with gr.Row():
517
- man = gr.Dropdown(label="Character 1 selection", choices=CHARACTER_MAN_NAMES, value="Chris Evans (identifier: Chris Evans)")
518
- woman = gr.Dropdown(label="Character 2 selection", choices=CHARACTER_WOMAN_NAMES, value="Taylor Swift (identifier: TaylorSwift)")
519
- resolution = gr.Dropdown(label="Image Resolution (width*height)", choices=resolution_list, value="1024*1024")
520
- condition = gr.Dropdown(label="Input condition type", choices=condition_list, value="None")
521
- style = gr.Dropdown(label="style", choices=STYLE_NAMES, value="None")
522
-
523
- with gr.Row():
524
- local_prompt1 = gr.Textbox(label="Character1_prompt",
525
- info="Describe the Character 1, this prompt should include the identifier of character 1",
526
- value="Close-up photo of the Chris Evans, 35mm photograph, film, professional, 4k, highly detailed.")
527
- local_prompt2 = gr.Textbox(label="Character2_prompt",
528
- info="Describe the Character 2, this prompt should include the identifier of character2",
529
- value="Close-up photo of the TaylorSwift, 35mm photograph, film, professional, 4k, highly detailed.")
530
-
531
- man.change(get_local_value_man, man, local_prompt1)
532
- woman.change(get_local_value_woman, woman, local_prompt2)
533
-
534
- # prompt
535
- with gr.Column():
536
- prompt = gr.Textbox(label="Prompt 1",
537
- info="Give a simple prompt to describe the first image content",
538
- placeholder="Required",
539
- value="close-up shot, photography, a man and a woman on the street, facing the camera smiling")
540
-
541
-
542
- with gr.Accordion(open=False, label="Advanced Options"):
543
- seed = gr.Slider(
544
- label="Seed",
545
- minimum=0,
546
- maximum=MAX_SEED,
547
- step=1,
548
- value=42,
549
- )
550
- negative_prompt = gr.Textbox(label="Negative Prompt",
551
- placeholder="noisy, blurry, soft, deformed, ugly",
552
- value="noisy, blurry, soft, deformed, ugly")
553
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
554
-
555
- submit = gr.Button("Submit", variant="primary")
556
-
557
- submit.click(
558
- fn=remove_tips,
559
- outputs=usage_tips,
560
- ).then(
561
- fn=randomize_seed_fn,
562
- inputs=[seed, randomize_seed],
563
- outputs=seed,
564
- queue=False,
565
- api_name=False,
566
- ).then(
567
- fn=generate_image,
568
- inputs=[prompt, negative_prompt, man, woman, resolution, local_prompt1, local_prompt2, seed, condition, condition_img1, style],
569
- outputs=[gallery, gen_condition]
570
- )
571
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
572
 
573
  def parse_args():
574
  parser = argparse.ArgumentParser('', add_help=False)
 
3
  import os
4
 
5
 
 
6
 
7
+ # os.system(f"git clone https://github.com/Curt-Park/yolo-world-with-efficientvit-sam.git")
8
+ # cwd0 = os.getcwd()
9
+ # cwd1 = os.path.join(cwd0, "yolo-world-with-efficientvit-sam")
10
+ # os.chdir(cwd1)
11
+ # os.system("make setup")
12
+ # os.system(f"cd /home/user/app")
 
13
 
14
  sys.path.append('./')
15
  import gradio as gr
 
27
  import cv2
28
  from PIL import Image, ImageOps
29
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
30
+ from controlnet_aux import OpenposeDetector
31
  from controlnet_aux.open_pose.body import Body
32
 
33
  try:
 
305
  def main(device, segment_type):
306
  pipe, controller, pipe_concept = build_model_sd(args.pretrained_sdxl_model, args.openpose_checkpoint, device, prompts_tmp)
307
 
308
+ # if segment_type == 'GroundingDINO':
309
+ # detect_model, sam = build_dino_segment_model(args.dino_checkpoint, args.sam_checkpoint)
310
+ # else:
311
+ # detect_model, sam = build_yolo_segment_model(args.efficientViT_checkpoint, device)
312
 
313
  resolution_list = ["1440*728",
314
  "1344*768",
 
496
  def get_local_value_woman(input):
497
  return character_woman[input][0]
498
 
499
+ @spaces.GPU
500
+ def generate(prompt):
501
+ print(os.system(f"pwd"))
502
+ return pipe(prompt).images
503
+
504
+ gr.Interface(
505
+ fn=generate,
506
+ inputs=gr.Text(),
507
+ outputs=gr.Gallery(),
508
+ ).launch()
509
+
510
+ # with gr.Blocks(css=css) as demo:
511
+ # # description
512
+ # gr.Markdown(title)
513
+ # gr.Markdown(description)
514
+ #
515
+ # with gr.Row():
516
+ # gallery = gr.Image(label="Generated Images", height=512, width=512)
517
+ # gen_condition = gr.Image(label="Spatial Condition", height=512, width=512)
518
+ # usage_tips = gr.Markdown(label="Usage tips of OMG", value=tips, visible=False)
519
+ #
520
+ # with gr.Row():
521
+ # condition_img1 = gr.Image(label="Input an RGB image for condition", height=128, width=128)
522
+ #
523
+ # # character choose
524
+ # with gr.Row():
525
+ # man = gr.Dropdown(label="Character 1 selection", choices=CHARACTER_MAN_NAMES, value="Chris Evans (identifier: Chris Evans)")
526
+ # woman = gr.Dropdown(label="Character 2 selection", choices=CHARACTER_WOMAN_NAMES, value="Taylor Swift (identifier: TaylorSwift)")
527
+ # resolution = gr.Dropdown(label="Image Resolution (width*height)", choices=resolution_list, value="1024*1024")
528
+ # condition = gr.Dropdown(label="Input condition type", choices=condition_list, value="None")
529
+ # style = gr.Dropdown(label="style", choices=STYLE_NAMES, value="None")
530
+ #
531
+ # with gr.Row():
532
+ # local_prompt1 = gr.Textbox(label="Character1_prompt",
533
+ # info="Describe the Character 1, this prompt should include the identifier of character 1",
534
+ # value="Close-up photo of the Chris Evans, 35mm photograph, film, professional, 4k, highly detailed.")
535
+ # local_prompt2 = gr.Textbox(label="Character2_prompt",
536
+ # info="Describe the Character 2, this prompt should include the identifier of character2",
537
+ # value="Close-up photo of the TaylorSwift, 35mm photograph, film, professional, 4k, highly detailed.")
538
+ #
539
+ # man.change(get_local_value_man, man, local_prompt1)
540
+ # woman.change(get_local_value_woman, woman, local_prompt2)
541
+ #
542
+ # # prompt
543
+ # with gr.Column():
544
+ # prompt = gr.Textbox(label="Prompt 1",
545
+ # info="Give a simple prompt to describe the first image content",
546
+ # placeholder="Required",
547
+ # value="close-up shot, photography, a man and a woman on the street, facing the camera smiling")
548
+ #
549
+ #
550
+ # with gr.Accordion(open=False, label="Advanced Options"):
551
+ # seed = gr.Slider(
552
+ # label="Seed",
553
+ # minimum=0,
554
+ # maximum=MAX_SEED,
555
+ # step=1,
556
+ # value=42,
557
+ # )
558
+ # negative_prompt = gr.Textbox(label="Negative Prompt",
559
+ # placeholder="noisy, blurry, soft, deformed, ugly",
560
+ # value="noisy, blurry, soft, deformed, ugly")
561
+ # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
562
+ #
563
+ # submit = gr.Button("Submit", variant="primary")
564
+ #
565
+ # submit.click(
566
+ # fn=remove_tips,
567
+ # outputs=usage_tips,
568
+ # ).then(
569
+ # fn=randomize_seed_fn,
570
+ # inputs=[seed, randomize_seed],
571
+ # outputs=seed,
572
+ # queue=False,
573
+ # api_name=False,
574
+ # ).then(
575
+ # fn=generate_image,
576
+ # inputs=[prompt, negative_prompt, man, woman, resolution, local_prompt1, local_prompt2, seed, condition, condition_img1, style],
577
+ # outputs=[gallery, gen_condition]
578
+ # )
579
+ # demo.launch(share=True)
580
 
581
  def parse_args():
582
  parser = argparse.ArgumentParser('', add_help=False)