magicfixeseverything commited on
Commit
3f5ca95
1 Parent(s): a465bc2

Upload spaghetti_ai_script.py

Browse files
Files changed (1) hide show
  1. spaghetti_ai_script.py +60 -28
spaghetti_ai_script.py CHANGED
@@ -428,22 +428,20 @@ allow_other_model_versions = 1
428
  #
429
 
430
  enable_image_preview = 1
431
- image_preview_step_interval = 5 #10
432
  image_preview_seconds_interval = 30
433
  load_image_preview_frequency_in_seconds = 2
434
  delete_preview_images_immediately = 1
435
 
436
- default_create_preview_images = 1 # 1
437
- default_do_not_create_refining_preview_images = 0 # 0
438
- default_do_not_create_upscaling_preview_images = 0 # 1
439
 
440
  ####################
441
 
442
  #
443
  # Allow Longer Prompts for Stable Diffusion 1.5 Based Models
444
  #
445
- # https://github.com/huggingface/diffusers/issues/2136#issuecomment-1514969011
446
- #
447
 
448
  enable_longer_prompts = 1
449
 
@@ -3637,7 +3635,7 @@ def create_preview_image (
3637
  #####################
3638
 
3639
  def get_pipeline_embeds(
3640
- pipeline,
3641
  prompt,
3642
  negative_prompt,
3643
  device,
@@ -3645,28 +3643,55 @@ def get_pipeline_embeds(
3645
  token_length_of_negative_prompt_text
3646
  ):
3647
 
3648
- max_length = pipeline.tokenizer.model_max_length
3649
 
3650
- # create the tensor based on which prompt is longer
3651
 
3652
  if token_length_of_prompt_text >= token_length_of_negative_prompt_text:
3653
 
3654
- input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False).input_ids.to(device)
 
 
 
 
 
3655
  shape_max_length = input_ids.shape[-1]
3656
- negative_ids = pipeline.tokenizer(negative_prompt, truncation=False, padding="max_length",
3657
- max_length=shape_max_length, return_tensors="pt").input_ids.to(device)
 
 
 
 
 
 
3658
 
3659
  else:
3660
- negative_ids = pipeline.tokenizer(negative_prompt, return_tensors="pt", truncation=False).input_ids.to(device)
 
 
 
 
 
 
3661
  shape_max_length = negative_ids.shape[-1]
3662
- input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False, padding="max_length",
3663
- max_length=shape_max_length).input_ids.to(device)
 
 
 
 
 
 
3664
 
3665
  concat_embeds = []
 
3666
  neg_embeds = []
 
3667
  for i in range(0, shape_max_length, max_length):
3668
- concat_embeds.append(pipeline.text_encoder(input_ids[:, i: i + max_length])[0])
3669
- neg_embeds.append(pipeline.text_encoder(negative_ids[:, i: i + max_length])[0])
 
 
3670
 
3671
  return torch.cat(concat_embeds, dim=1), torch.cat(neg_embeds, dim=1)
3672
 
@@ -4042,26 +4067,30 @@ def create_image_function (
4042
 
4043
 
4044
 
 
4045
 
 
4046
 
 
 
4047
 
 
4048
 
 
 
 
 
4049
 
 
4050
 
 
4051
 
 
4052
 
4053
- tokenizer = pipe.tokenizer
4054
-
4055
- max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
4056
 
4057
- token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
4058
- token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
4059
 
4060
  if (
4061
- (
4062
- (token_length_of_prompt_text > max_token_length_of_model) or
4063
- (token_length_of_negative_prompt_text > max_token_length_of_model)
4064
- ) and
4065
  (allow_longer_prompts_for_sd_1_5_based_models_field_value == 1)
4066
  ):
4067
 
@@ -4083,7 +4112,10 @@ def create_image_function (
4083
 
4084
  # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning
4085
  # it is truncated to 75. This happens automatically, but we want to
4086
- # tell people that
 
 
 
4087
 
4088
  prompt_embeds = None
4089
  negative_prompt_embeds = None
@@ -6825,7 +6857,7 @@ with gr.Blocks(
6825
  model_types_for_longer_prompts_html += " models"
6826
 
6827
  allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox(
6828
- label = "Allow longer prompts for " + model_types_for_longer_prompts_html + " models when not using the refiner or upscaler. (will sometimes fail)",
6829
  value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected,
6830
  interactive = True,
6831
  container = True,
 
428
  #
429
 
430
  enable_image_preview = 1
431
+ image_preview_step_interval = 10
432
  image_preview_seconds_interval = 30
433
  load_image_preview_frequency_in_seconds = 2
434
  delete_preview_images_immediately = 1
435
 
436
+ default_create_preview_images = 1
437
+ default_do_not_create_refining_preview_images = 0
438
+ default_do_not_create_upscaling_preview_images = 1
439
 
440
  ####################
441
 
442
  #
443
  # Allow Longer Prompts for Stable Diffusion 1.5 Based Models
444
  #
 
 
445
 
446
  enable_longer_prompts = 1
447
 
 
3635
  #####################
3636
 
3637
  def get_pipeline_embeds(
3638
+ pipe,
3639
  prompt,
3640
  negative_prompt,
3641
  device,
 
3643
  token_length_of_negative_prompt_text
3644
  ):
3645
 
3646
+ max_length = pipe.tokenizer.model_max_length
3647
 
3648
+ # Create the tensor based on which prompt is longer
3649
 
3650
  if token_length_of_prompt_text >= token_length_of_negative_prompt_text:
3651
 
3652
+ input_ids = pipe.tokenizer(
3653
+ prompt,
3654
+ return_tensors = "pt",
3655
+ truncation = False
3656
+ ).input_ids.to(device)
3657
+
3658
  shape_max_length = input_ids.shape[-1]
3659
+
3660
+ negative_ids = pipe.tokenizer(
3661
+ negative_prompt,
3662
+ truncation = False,
3663
+ padding = "max_length",
3664
+ max_length = shape_max_length,
3665
+ return_tensors = "pt"
3666
+ ).input_ids.to(device)
3667
 
3668
  else:
3669
+
3670
+ negative_ids = pipe.tokenizer(
3671
+ negative_prompt,
3672
+ return_tensors = "pt",
3673
+ truncation = False
3674
+ ).input_ids.to(device)
3675
+
3676
  shape_max_length = negative_ids.shape[-1]
3677
+
3678
+ input_ids = pipe.tokenizer(
3679
+ prompt,
3680
+ return_tensors="pt",
3681
+ truncation = False,
3682
+ padding = "max_length",
3683
+ max_length = shape_max_length
3684
+ ).input_ids.to(device)
3685
 
3686
  concat_embeds = []
3687
+
3688
  neg_embeds = []
3689
+
3690
  for i in range(0, shape_max_length, max_length):
3691
+
3692
+ concat_embeds.append(pipe.text_encoder(input_ids[:, i: i + max_length])[0])
3693
+
3694
+ neg_embeds.append(pipe.text_encoder(negative_ids[:, i: i + max_length])[0])
3695
 
3696
  return torch.cat(concat_embeds, dim=1), torch.cat(neg_embeds, dim=1)
3697
 
 
4067
 
4068
 
4069
 
4070
+ tokenizer = pipe.tokenizer
4071
 
4072
+ max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
4073
 
4074
+ token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
4075
+ token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
4076
 
4077
+ both_prompts_under_length_limit = 0
4078
 
4079
+ if (
4080
+ (token_length_of_prompt_text <= max_token_length_of_model) and
4081
+ (token_length_of_negative_prompt_text <= max_token_length_of_model)
4082
+ ):
4083
 
4084
+ # No need to handle a long prompt
4085
 
4086
+ both_prompts_under_length_limit = 1
4087
 
4088
+ allow_longer_prompts_for_sd_1_5_based_models_field_value = 0
4089
 
 
 
 
4090
 
 
 
4091
 
4092
  if (
4093
+ (both_prompts_under_length_limit == 0) and
 
 
 
4094
  (allow_longer_prompts_for_sd_1_5_based_models_field_value == 1)
4095
  ):
4096
 
 
4112
 
4113
  # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning
4114
  # it is truncated to 75. This happens automatically, but we want to
4115
+ # tell people that.
4116
+ #
4117
+ # Also applies when both prompts are under the maximum and they do
4118
+ # allow longer prompts too.
4119
 
4120
  prompt_embeds = None
4121
  negative_prompt_embeds = None
 
6857
  model_types_for_longer_prompts_html += " models"
6858
 
6859
  allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox(
6860
+ label = "Allow longer prompts for " + model_types_for_longer_prompts_html + " models when not using the refiner or upscaler.",
6861
  value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected,
6862
  interactive = True,
6863
  container = True,