magicfixeseverything commited on
Commit
627d7ad
1 Parent(s): e5362c6

Upload spaghetti_ai_script.py

Browse files
Files changed (1) hide show
  1. spaghetti_ai_script.py +38 -25
spaghetti_ai_script.py CHANGED
@@ -442,6 +442,8 @@ default_do_not_create_upscaling_preview_images = 0 # 1
442
  #
443
  # Allow Longer Prompts for Stable Diffusion 1.5 Based Models
444
  #
 
 
445
 
446
  enable_longer_prompts = 1
447
 
@@ -3630,30 +3632,25 @@ def create_preview_image (
3630
  # This is used to get longer prompts for Stable Diffusion 1.5 based
3631
  # models.
3632
  #
 
 
3633
  #####################
3634
 
3635
  def get_pipeline_embeds(
3636
  pipeline,
3637
  prompt,
3638
  negative_prompt,
3639
- device
 
 
3640
  ):
3641
 
3642
- """ Get pipeline embeds for prompts bigger than the maxlength of the pipe
3643
- :param pipeline:
3644
- :param prompt:
3645
- :param negative_prompt:
3646
- :param device:
3647
- :return:
3648
- """
3649
  max_length = pipeline.tokenizer.model_max_length
3650
 
3651
- # simple way to determine length of tokens
3652
- count_prompt = len(prompt.split(" "))
3653
- count_negative_prompt = len(negative_prompt.split(" "))
3654
-
3655
  # create the tensor based on which prompt is longer
3656
- if count_prompt >= count_negative_prompt:
 
 
3657
  input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False).input_ids.to(device)
3658
  shape_max_length = input_ids.shape[-1]
3659
  negative_ids = pipeline.tokenizer(negative_prompt, truncation=False, padding="max_length",
@@ -4043,7 +4040,30 @@ def create_image_function (
4043
  prompt_text_not_used_substring = ""
4044
  negative_prompt_text_not_used_substring = ""
4045
 
4046
- if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4047
 
4048
  # Longer prompts are allowed
4049
 
@@ -4051,7 +4071,9 @@ def create_image_function (
4051
  pipe,
4052
  prompt_text,
4053
  negative_prompt_text,
4054
- device
 
 
4055
  )
4056
 
4057
  prompt_text_to_use_inside_pipeline = None
@@ -4066,15 +4088,6 @@ def create_image_function (
4066
  prompt_embeds = None
4067
  negative_prompt_embeds = None
4068
 
4069
- tokenizer = pipe.tokenizer
4070
-
4071
- max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
4072
-
4073
- token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
4074
- token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
4075
-
4076
-
4077
-
4078
  truncated_prompts = 0
4079
 
4080
  partial_prompt_or_negative_prompt_length_too_long_message = ""
@@ -6812,7 +6825,7 @@ with gr.Blocks(
6812
  model_types_for_longer_prompts_html += " models"
6813
 
6814
  allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox(
6815
- label = "Allow longer prompts for " + model_types_for_longer_prompts_html + " models when not using the refiner or upscaler",
6816
  value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected,
6817
  interactive = True,
6818
  container = True,
 
442
  #
443
  # Allow Longer Prompts for Stable Diffusion 1.5 Based Models
444
  #
445
+ # https://github.com/huggingface/diffusers/issues/2136#issuecomment-1514969011
446
+ #
447
 
448
  enable_longer_prompts = 1
449
 
 
3632
  # This is used to get longer prompts for Stable Diffusion 1.5 based
3633
  # models.
3634
  #
3635
+ # https://github.com/huggingface/diffusers/issues/2136#issuecomment-1514969011
3636
+ #
3637
  #####################
3638
 
3639
  def get_pipeline_embeds(
3640
  pipeline,
3641
  prompt,
3642
  negative_prompt,
3643
+ device,
3644
+ token_length_of_prompt_text,
3645
+ token_length_of_negative_prompt_text
3646
  ):
3647
 
 
 
 
 
 
 
 
3648
  max_length = pipeline.tokenizer.model_max_length
3649
 
 
 
 
 
3650
  # create the tensor based on which prompt is longer
3651
+
3652
+ if token_length_of_prompt_text >= token_length_of_negative_prompt_text:
3653
+
3654
  input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False).input_ids.to(device)
3655
  shape_max_length = input_ids.shape[-1]
3656
  negative_ids = pipeline.tokenizer(negative_prompt, truncation=False, padding="max_length",
 
4040
  prompt_text_not_used_substring = ""
4041
  negative_prompt_text_not_used_substring = ""
4042
 
4043
+
4044
+
4045
+
4046
+
4047
+
4048
+
4049
+
4050
+
4051
+
4052
+
4053
+ tokenizer = pipe.tokenizer
4054
+
4055
+ max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens
4056
+
4057
+ token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text))
4058
+ token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text))
4059
+
4060
+ if (
4061
+ (
4062
+ (token_length_of_prompt_text > max_token_length_of_model) or
4063
+ (token_length_of_negative_prompt_text > max_token_length_of_model)
4064
+ ) and
4065
+ (allow_longer_prompts_for_sd_1_5_based_models_field_value == 1)
4066
+ ):
4067
 
4068
  # Longer prompts are allowed
4069
 
 
4071
  pipe,
4072
  prompt_text,
4073
  negative_prompt_text,
4074
+ device,
4075
+ token_length_of_prompt_text,
4076
+ token_length_of_negative_prompt_text
4077
  )
4078
 
4079
  prompt_text_to_use_inside_pipeline = None
 
4088
  prompt_embeds = None
4089
  negative_prompt_embeds = None
4090
 
 
 
 
 
 
 
 
 
 
4091
  truncated_prompts = 0
4092
 
4093
  partial_prompt_or_negative_prompt_length_too_long_message = ""
 
6825
  model_types_for_longer_prompts_html += " models"
6826
 
6827
  allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox(
6828
+ label = "Allow longer prompts for " + model_types_for_longer_prompts_html + " models when not using the refiner or upscaler. (will sometimes fail)",
6829
  value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected,
6830
  interactive = True,
6831
  container = True,