svjack commited on
Commit
9ee1495
1 Parent(s): 0ccfc0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -11
app.py CHANGED
@@ -44,7 +44,7 @@ prompt_df = pd.read_csv("Stable-Diffusion-Prompts.csv")
44
  #DEFAULT_PROMPT = "1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt"
45
  DEFAULT_PROMPT = "X go to Istanbul"
46
  DEFAULT_ROLE = "Superman"
47
- DEFAULT_BOOK_COVER = "book_cover_dir/JMW_Turner_-_Nantes_from_the_Ile_Feydeau.jpg"
48
 
49
  hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
50
 
@@ -422,19 +422,20 @@ def style_transfer_func(content_img, style_img, style_transfer_client = style_tr
422
  os.remove(style_im_name)
423
  return Image.open(out)
424
  '''
425
- def style_transfer_func(content_img, style_img):
426
  assert hasattr(content_img, "save")
427
  assert hasattr(style_img, "save")
428
  content_image_input = np.asarray(content_img)
429
  style_image_input = np.asarray(style_img)
430
- out = perform_neural_transfer(content_image_input, style_image_input, super_resolution_type = "none")
431
  assert hasattr(out, "save")
432
  return out
433
 
434
 
435
  def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_name = DEFAULT_ROLE,
436
- style_pic = None
437
  ):
 
438
  event_reasoning_dict = produce_4_event(event_fact)
439
  caption_list ,event_reasoning_sd_list = transform_4_event_as_sd_prompts(event_fact ,
440
  event_reasoning_dict,
@@ -451,9 +452,14 @@ def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_
451
  print("perform styling.....")
452
  img_list_ = []
453
  for x in tqdm(img_list):
454
- img_list_.append(
455
- style_transfer_func(x, style_pic)
456
- )
 
 
 
 
 
457
  img_list = img_list_
458
  img_list = list(map(lambda t2: add_caption_on_image(t2[0], t2[1]) ,zip(*[img_list, caption_list])))
459
  img_mid = img_list[2]
@@ -466,7 +472,9 @@ def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_
466
 
467
  def image_click(images, evt: gr.SelectData,
468
  ):
469
- img_selected = images[evt.index]["name"]
 
 
470
  #print(img_selected)
471
  return img_selected
472
 
@@ -475,10 +483,13 @@ def get_book_covers():
475
  list(pathlib.Path("book_cover_dir").rglob("*.jpg")) + \
476
  list(pathlib.Path("book_cover_dir").rglob("*.png")) + \
477
  list(pathlib.Path("book_cover_dir").rglob("*.jpeg"))
478
- ).map(str).map(lambda x: np.nan if x.split("/")[-1].startswith("_") else x).dropna().values.tolist()
 
 
 
479
  return covers
480
 
481
- with gr.Blocks() as demo:
482
  favicon = '<img src="" width="48px" style="display: inline">'
483
  gr.Markdown(
484
  f"""<h1><center>🌻{favicon} AI Diffusion</center></h1>
@@ -499,6 +510,9 @@ with gr.Blocks() as demo:
499
  value = DEFAULT_BOOK_COVER,
500
  interactive = True,
501
  )
 
 
 
502
 
503
  with gr.Row():
504
  text_prompt = gr.Textbox(label="Prompt", placeholder="a cute dog", lines=1, elem_id="prompt-text-input", value = DEFAULT_PROMPT)
@@ -515,7 +529,7 @@ with gr.Blocks() as demo:
515
  image_click, style_reference_input_gallery, style_reference_input_image
516
  )
517
 
518
- text_button.click(gen_images_from_event_fact, inputs=[current_model, text_prompt, role_name, style_reference_input_image],
519
  outputs=video_output)
520
 
521
  #select_button.click(generate_txt2img, inputs=[current_model, select_prompt, negative_prompt, image_style], outputs=image_output)
 
44
  #DEFAULT_PROMPT = "1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt"
45
  DEFAULT_PROMPT = "X go to Istanbul"
46
  DEFAULT_ROLE = "Superman"
47
+ DEFAULT_BOOK_COVER = "book_cover_dir/Blank.jpg"
48
 
49
  hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
50
 
 
422
  os.remove(style_im_name)
423
  return Image.open(out)
424
  '''
425
+ def style_transfer_func(content_img, style_img, super_resolution_type = "none"):
426
  assert hasattr(content_img, "save")
427
  assert hasattr(style_img, "save")
428
  content_image_input = np.asarray(content_img)
429
  style_image_input = np.asarray(style_img)
430
+ out = perform_neural_transfer(content_image_input, style_image_input, super_resolution_type = super_resolution_type)
431
  assert hasattr(out, "save")
432
  return out
433
 
434
 
435
  def gen_images_from_event_fact(current_model, event_fact = DEFAULT_PROMPT, role_name = DEFAULT_ROLE,
436
+ style_pic = None, super_resolution_type = "SD(Standard Definition)"
437
  ):
438
+ assert super_resolution_type in ["SD(Standard Definition)", "HD(High Definition)"]
439
  event_reasoning_dict = produce_4_event(event_fact)
440
  caption_list ,event_reasoning_sd_list = transform_4_event_as_sd_prompts(event_fact ,
441
  event_reasoning_dict,
 
452
  print("perform styling.....")
453
  img_list_ = []
454
  for x in tqdm(img_list):
455
+ if super_resolution_type == "SD(Standard Definition)":
456
+ img_list_.append(
457
+ style_transfer_func(x, style_pic, super_resolution_type = "none")
458
+ )
459
+ else:
460
+ img_list_.append(
461
+ style_transfer_func(x, style_pic, super_resolution_type = "anime")
462
+ )
463
  img_list = img_list_
464
  img_list = list(map(lambda t2: add_caption_on_image(t2[0], t2[1]) ,zip(*[img_list, caption_list])))
465
  img_mid = img_list[2]
 
472
 
473
  def image_click(images, evt: gr.SelectData,
474
  ):
475
+ #print(images)
476
+ #print(evt.index)
477
+ img_selected = images[evt.index][0]["name"]
478
  #print(img_selected)
479
  return img_selected
480
 
 
483
  list(pathlib.Path("book_cover_dir").rglob("*.jpg")) + \
484
  list(pathlib.Path("book_cover_dir").rglob("*.png")) + \
485
  list(pathlib.Path("book_cover_dir").rglob("*.jpeg"))
486
+ ).map(str).map(lambda x: np.nan if x.split("/")[-1].startswith("_") else x).dropna().map(
487
+ lambda x: (x, "".join(x.split(".")[:-1]).split("/")[-1])
488
+ ).values.tolist()
489
+ covers = sorted(covers, key = lambda t2: int(DEFAULT_BOOK_COVER in t2[0]), reverse = True)
490
  return covers
491
 
492
+ with gr.Blocks(css=".caption-label {display:none}") as demo:
493
  favicon = '<img src="" width="48px" style="display: inline">'
494
  gr.Markdown(
495
  f"""<h1><center>🌻{favicon} AI Diffusion</center></h1>
 
510
  value = DEFAULT_BOOK_COVER,
511
  interactive = True,
512
  )
513
+ super_resolution_type = gr.Radio(choices = ["SD(Standard Definition)" ,"HD(High Definition)"],
514
+ value="SD(Standard Definition)", label="Story Video Quality",
515
+ interactive = True)
516
 
517
  with gr.Row():
518
  text_prompt = gr.Textbox(label="Prompt", placeholder="a cute dog", lines=1, elem_id="prompt-text-input", value = DEFAULT_PROMPT)
 
529
  image_click, style_reference_input_gallery, style_reference_input_image
530
  )
531
 
532
+ text_button.click(gen_images_from_event_fact, inputs=[current_model, text_prompt, role_name, style_reference_input_image, super_resolution_type],
533
  outputs=video_output)
534
 
535
  #select_button.click(generate_txt2img, inputs=[current_model, select_prompt, negative_prompt, image_style], outputs=image_output)