Fucius commited on
Commit
78ed006
1 Parent(s): 594c636

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -17
app.py CHANGED
@@ -6,12 +6,7 @@ torch.jit.script = lambda f: f
6
  import timm
7
 
8
 
9
- # os.system(f"git clone https://github.com/Curt-Park/yolo-world-with-efficientvit-sam.git")
10
- # cwd0 = os.getcwd()
11
- # cwd1 = os.path.join(cwd0, "yolo-world-with-efficientvit-sam")
12
- # os.chdir(cwd1)
13
- # os.system("make setup")
14
- # os.system(f"cd /home/user/app")
15
 
16
  sys.path.append('./')
17
  import gradio as gr
@@ -30,7 +25,7 @@ import cv2
30
  from PIL import Image, ImageOps
31
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
32
  from controlnet_aux import OpenposeDetector
33
- # from controlnet_aux.open_pose.body import Body
34
 
35
 
36
  from inference.models import YOLOWorld
@@ -497,16 +492,76 @@ def main(device, segment_type):
497
  def get_local_value_woman(input):
498
  return character_woman[input][0]
499
 
500
- @spaces.GPU
501
- def generate(prompt):
502
- print(os.system(prompt))
503
- return prompt
504
-
505
- gr.Interface(
506
- fn=generate,
507
- inputs=gr.Text(),
508
- outputs=gr.Gallery(),
509
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
 
511
 
512
 
 
6
  import timm
7
 
8
 
9
+
 
 
 
 
 
10
 
11
  sys.path.append('./')
12
  import gradio as gr
 
25
  from PIL import Image, ImageOps
26
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
27
  from controlnet_aux import OpenposeDetector
28
+ from controlnet_aux.open_pose.body import Body
29
 
30
 
31
  from inference.models import YOLOWorld
 
492
  def get_local_value_woman(input):
493
  return character_woman[input][0]
494
 
495
+ with gr.Blocks(css=css) as demo:
496
+ # description
497
+ gr.Markdown(title)
498
+ gr.Markdown(description)
499
+
500
+ with gr.Row():
501
+ gallery = gr.Image(label="Generated Images", height=512, width=512)
502
+ gen_condition = gr.Image(label="Spatial Condition", height=512, width=512)
503
+ usage_tips = gr.Markdown(label="Usage tips of OMG", value=tips, visible=False)
504
+
505
+ with gr.Row():
506
+ condition_img1 = gr.Image(label="Input an RGB image for condition", height=128, width=128)
507
+
508
+ # character choose
509
+ with gr.Row():
510
+ man = gr.Dropdown(label="Character 1 selection", choices=CHARACTER_MAN_NAMES, value="Chris Evans (identifier: Chris Evans)")
511
+ woman = gr.Dropdown(label="Character 2 selection", choices=CHARACTER_WOMAN_NAMES, value="Taylor Swift (identifier: TaylorSwift)")
512
+ resolution = gr.Dropdown(label="Image Resolution (width*height)", choices=resolution_list, value="1024*1024")
513
+ condition = gr.Dropdown(label="Input condition type", choices=condition_list, value="None")
514
+ style = gr.Dropdown(label="style", choices=STYLE_NAMES, value="None")
515
+
516
+ with gr.Row():
517
+ local_prompt1 = gr.Textbox(label="Character1_prompt",
518
+ info="Describe the Character 1, this prompt should include the identifier of character 1",
519
+ value="Close-up photo of the Chris Evans, 35mm photograph, film, professional, 4k, highly detailed.")
520
+ local_prompt2 = gr.Textbox(label="Character2_prompt",
521
+ info="Describe the Character 2, this prompt should include the identifier of character2",
522
+ value="Close-up photo of the TaylorSwift, 35mm photograph, film, professional, 4k, highly detailed.")
523
+
524
+ man.change(get_local_value_man, man, local_prompt1)
525
+ woman.change(get_local_value_woman, woman, local_prompt2)
526
+
527
+ # prompt
528
+ with gr.Column():
529
+ prompt = gr.Textbox(label="Prompt 1",
530
+ info="Give a simple prompt to describe the first image content",
531
+ placeholder="Required",
532
+ value="close-up shot, photography, a man and a woman on the street, facing the camera smiling")
533
+
534
+
535
+ with gr.Accordion(open=False, label="Advanced Options"):
536
+ seed = gr.Slider(
537
+ label="Seed",
538
+ minimum=0,
539
+ maximum=MAX_SEED,
540
+ step=1,
541
+ value=42,
542
+ )
543
+ negative_prompt = gr.Textbox(label="Negative Prompt",
544
+ placeholder="noisy, blurry, soft, deformed, ugly",
545
+ value="noisy, blurry, soft, deformed, ugly")
546
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
547
+
548
+ submit = gr.Button("Submit", variant="primary")
549
+
550
+ submit.click(
551
+ fn=remove_tips,
552
+ outputs=usage_tips,
553
+ ).then(
554
+ fn=randomize_seed_fn,
555
+ inputs=[seed, randomize_seed],
556
+ outputs=seed,
557
+ queue=False,
558
+ api_name=False,
559
+ ).then(
560
+ fn=generate_image,
561
+ inputs=[prompt, negative_prompt, man, woman, resolution, local_prompt1, local_prompt2, seed, condition, condition_img1, style],
562
+ outputs=[gallery, gen_condition]
563
+ )
564
+ demo.launch(share=True)
565
 
566
 
567