Leyo HF staff commited on
Commit
a31e3c6
1 Parent(s): ef7d498

allow more lines to be written

Browse files
Files changed (1) hide show
  1. app_dialogue.py +70 -75
app_dialogue.py CHANGED
@@ -356,6 +356,41 @@ def resize_with_ratio(image: PIL.Image.Image, fixed_width: int) -> PIL.Image.Ima
356
  return resized_img
357
 
358
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
  def test_font_size(
360
  draw,
361
  image,
@@ -368,75 +403,25 @@ def test_font_size(
368
  ):
369
  text_width = draw.textlength(text, font)
370
  text_is_too_long = True
371
-
372
- if num_lines == 1:
373
- while font.size > min_font and text_is_too_long:
374
- font = ImageFont.truetype(
375
- f"fonts/{font_meme_text}.ttf", size=font.size - font_size_reduction
376
- )
377
  text_width = draw.textlength(text, font)
378
  text_is_too_long = text_width > image.width
379
-
380
- elif num_lines == 2:
381
- while font.size > min_font and text_is_too_long:
382
- font = ImageFont.truetype(
383
- f"fonts/{font_meme_text}.ttf", size=font.size - font_size_reduction
 
 
 
 
384
  )
385
- max_len_increment = 0
386
- while (
387
- text_is_too_long
388
- and max_len_increment < 10
389
- and max_len_increment < (len(text)) // 2
390
- ):
391
- temp_text = insert_backslash(
392
- text, max_length=(len(text) + max_len_increment) // 2
393
- )
394
- first_line, second_line = (
395
- temp_text.split("\n")[0],
396
- temp_text.split("\n")[1],
397
- )
398
- text_width = max(
399
- draw.textlength(first_line, font),
400
- draw.textlength(second_line, font),
401
- )
402
- text_is_too_long = text_width > image.width
403
- max_len_increment += 1
404
-
405
- elif num_lines == 3:
406
- while font.size > min_font and text_is_too_long:
407
- font = ImageFont.truetype(
408
- f"fonts/{font_meme_text}.ttf", size=font.size - font_size_reduction
409
- )
410
- max_len_incr_1_split = 0
411
- while text_is_too_long and max_len_incr_1_split < 10:
412
- first_temp_text = insert_backslash(
413
- text, max_length=(len(text) + max_len_incr_1_split) // 3
414
- )
415
- first_line, second_line = (
416
- first_temp_text.split("\n")[0],
417
- first_temp_text.split("\n")[1],
418
- )
419
- max_len_incr_2_split = 0
420
- while text_is_too_long and max_len_incr_2_split < 10:
421
- temp_text_second_line = insert_backslash(
422
- second_line,
423
- max_length=(len(second_line) + max_len_incr_2_split) // 2,
424
- )
425
- second_line_1, second_line_2 = (
426
- temp_text_second_line.split("\n")[0],
427
- temp_text_second_line.split("\n")[1],
428
- )
429
- temp_text = first_line + "\n" + second_line_1 + "\n" + second_line_2
430
- text_width = max(
431
- draw.textlength(first_line, font),
432
- draw.textlength(second_line_1, font),
433
- draw.textlength(second_line_2, font),
434
- )
435
- text_is_too_long = text_width > image.width
436
- max_len_incr_2_split += 1
437
- max_len_incr_1_split += 1
438
- else:
439
- raise (ValueError("num_lines can only be 1, 2 or 3"))
440
 
441
  if not text_is_too_long and num_lines > 1:
442
  text = temp_text
@@ -471,7 +456,7 @@ def make_meme_image(
471
  initial_font_size = 80
472
  text_is_too_long = True
473
  num_lines = 0
474
- while text_is_too_long and num_lines < 3:
475
  num_lines += 1
476
  font = ImageFont.truetype(f"fonts/{font_meme_text}.ttf", size=initial_font_size)
477
  text, font, text_width, text_is_too_long = test_font_size(
@@ -494,7 +479,7 @@ def make_meme_image(
494
 
495
  outline_width = 2
496
  text_x = (image_width - text_width) / 2
497
- text_y = image_height - num_lines * font.size - 10 - num_lines
498
  if text_at_the_top:
499
  text_y = 0
500
 
@@ -583,16 +568,16 @@ chatbot = gr.Chatbot(
583
  visible=False,
584
  avatar_images=[None, BOT_AVATAR],
585
  )
586
- css='''
587
  .gradio-container{max-width: 970px!important}
588
  h1{display: flex;align-items: center;justify-content: center;gap: .25em}
589
- '''
590
  with gr.Blocks(title="AI Meme Generator", theme=gr.themes.Base(), css=css) as demo:
591
  with gr.Row(scale=0.5):
592
  gr.HTML(
593
  """<h1 align="center">AI Meme Generator <span style="font-size: 13px;">powered by <a href="https://huggingface.co/blog/idefics">IDEFICS</a></span><img width=40 height=40 src="https://cdn-uploads.huggingface.co/production/uploads/624bebf604abc7ebb01789af/v770xGti5vH1SYLBgyOO_.png" /></h1>"""
594
  )
595
-
596
  with gr.Row(elem_id="model_selector_row"):
597
  model_selector = gr.Dropdown(
598
  choices=MODELS,
@@ -896,7 +881,9 @@ with gr.Blocks(title="AI Meme Generator", theme=gr.themes.Base(), css=css) as de
896
  full_text += acc_text
897
  acc_text = ""
898
 
899
- textbox.submit(fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False).then(
 
 
900
  fn=model_inference,
901
  inputs=[
902
  model_selector,
@@ -915,8 +902,14 @@ with gr.Blocks(title="AI Meme Generator", theme=gr.themes.Base(), css=css) as de
915
  ],
916
  outputs=[textbox, generated_memes_gallery, chatbot],
917
  )
918
- imagebox.upload(fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False).then(
919
- fn=lambda: "Write a meme about this image.", inputs=[], outputs=[textbox], queue=False).then(
 
 
 
 
 
 
920
  fn=model_inference,
921
  inputs=[
922
  model_selector,
@@ -939,7 +932,9 @@ with gr.Blocks(title="AI Meme Generator", theme=gr.themes.Base(), css=css) as de
939
  chatbot,
940
  ],
941
  )
942
- submit_btn.click(fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False).then(
 
 
943
  fn=model_inference,
944
  inputs=[
945
  model_selector,
 
356
  return resized_img
357
 
358
 
359
+ def make_new_lines(draw, image, font, text_is_too_long, lines, num_lines, num_loops):
360
+ max_len_increment = 0
361
+ while text_is_too_long and max_len_increment < 10:
362
+ new_lines = lines.copy()
363
+ last_line_with_backslash = insert_backslash(
364
+ new_lines[-1],
365
+ max_length=(len(new_lines[-1]) + max_len_increment)
366
+ // (num_lines - num_loops),
367
+ )
368
+ penultimate_line, last_line = (
369
+ last_line_with_backslash.split("\n")[0],
370
+ last_line_with_backslash.split("\n")[1],
371
+ )
372
+ new_lines.pop(-1)
373
+ new_lines.append(penultimate_line)
374
+ new_lines.append(last_line)
375
+ # If the we haven't reached the last line, we split it again
376
+ if len(new_lines) < num_lines:
377
+ new_lines, text_width, text_is_too_long = make_new_lines(
378
+ draw=draw,
379
+ image=image,
380
+ font=font,
381
+ text_is_too_long=text_is_too_long,
382
+ lines=new_lines,
383
+ num_lines=num_lines,
384
+ num_loops=num_loops + 1,
385
+ )
386
+ text_width = max([draw.textlength(line, font) for line in new_lines])
387
+ text_is_too_long = text_width > image.width
388
+ max_len_increment += 1
389
+ if not text_is_too_long:
390
+ lines = new_lines
391
+ return lines, text_width, text_is_too_long
392
+
393
+
394
  def test_font_size(
395
  draw,
396
  image,
 
403
  ):
404
  text_width = draw.textlength(text, font)
405
  text_is_too_long = True
406
+ lines = [text]
407
+ while font.size > min_font and text_is_too_long:
408
+ font = ImageFont.truetype(
409
+ f"fonts/{font_meme_text}.ttf", size=font.size - font_size_reduction
410
+ )
411
+ if num_lines == 1:
412
  text_width = draw.textlength(text, font)
413
  text_is_too_long = text_width > image.width
414
+ else:
415
+ lines, text_width, text_is_too_long = make_new_lines(
416
+ draw=draw,
417
+ image=image,
418
+ font=font,
419
+ text_is_too_long=text_is_too_long,
420
+ lines=lines,
421
+ num_lines=num_lines,
422
+ num_loops=0,
423
  )
424
+ temp_text = "\n".join(lines)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
 
426
  if not text_is_too_long and num_lines > 1:
427
  text = temp_text
 
456
  initial_font_size = 80
457
  text_is_too_long = True
458
  num_lines = 0
459
+ while text_is_too_long and num_lines < 8:
460
  num_lines += 1
461
  font = ImageFont.truetype(f"fonts/{font_meme_text}.ttf", size=initial_font_size)
462
  text, font, text_width, text_is_too_long = test_font_size(
 
479
 
480
  outline_width = 2
481
  text_x = (image_width - text_width) / 2
482
+ text_y = image_height - num_lines * font.size - 10 - 2 * num_lines
483
  if text_at_the_top:
484
  text_y = 0
485
 
 
568
  visible=False,
569
  avatar_images=[None, BOT_AVATAR],
570
  )
571
+ css = """
572
  .gradio-container{max-width: 970px!important}
573
  h1{display: flex;align-items: center;justify-content: center;gap: .25em}
574
+ """
575
  with gr.Blocks(title="AI Meme Generator", theme=gr.themes.Base(), css=css) as demo:
576
  with gr.Row(scale=0.5):
577
  gr.HTML(
578
  """<h1 align="center">AI Meme Generator <span style="font-size: 13px;">powered by <a href="https://huggingface.co/blog/idefics">IDEFICS</a></span><img width=40 height=40 src="https://cdn-uploads.huggingface.co/production/uploads/624bebf604abc7ebb01789af/v770xGti5vH1SYLBgyOO_.png" /></h1>"""
579
  )
580
+
581
  with gr.Row(elem_id="model_selector_row"):
582
  model_selector = gr.Dropdown(
583
  choices=MODELS,
 
881
  full_text += acc_text
882
  acc_text = ""
883
 
884
+ textbox.submit(
885
+ fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False
886
+ ).then(
887
  fn=model_inference,
888
  inputs=[
889
  model_selector,
 
902
  ],
903
  outputs=[textbox, generated_memes_gallery, chatbot],
904
  )
905
+ imagebox.upload(
906
+ fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False
907
+ ).then(
908
+ fn=lambda: "Write a meme about this image.",
909
+ inputs=[],
910
+ outputs=[textbox],
911
+ queue=False,
912
+ ).then(
913
  fn=model_inference,
914
  inputs=[
915
  model_selector,
 
932
  chatbot,
933
  ],
934
  )
935
+ submit_btn.click(
936
+ fn=lambda: "", inputs=[], outputs=[generated_memes_gallery], queue=False
937
+ ).then(
938
  fn=model_inference,
939
  inputs=[
940
  model_selector,