magicfixeseverything commited on
Commit
e426efa
1 Parent(s): 90498d5

Upload 4 files

Browse files
spaghetti_ai.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6010b8e71b9f3c0f1a1a6ce1694f1c242f25e9e9fbd4e1955d187cd8211b9277
3
- size 86416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099991965df4631d1c4eb0423fa9bf477e582e237926bad9bf7d8fe117205d45
3
+ size 88891
spaghetti_ai_launcher.bat CHANGED
@@ -27,8 +27,13 @@ REM don't need an updated version. The older versions will not be deleted
27
  REM automatically. That means your drive would eventually run out of
28
  REM space if you never deleted older versions.
29
  REM
 
 
 
 
 
30
 
31
- set HF_HUB_OFFLINE=0
32
 
33
  REM
34
  REM HF_HUB_DISABLE_TELEMETRY
@@ -38,8 +43,15 @@ REM
38
  REM About:
39
  REM https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhubdisabletelemetry
40
  REM
 
 
 
 
 
 
41
 
42
  set HF_HUB_DISABLE_TELEMETRY=1
 
43
 
44
  REM ############################################################################
45
 
@@ -47,6 +59,6 @@ cd C:/Spaghetti_AI
47
 
48
  call .venv/Scripts/activate.bat
49
 
50
- call py .venv/app_files/spaghetti_ai_script.py
51
 
52
  cmd /k
 
27
  REM automatically. That means your drive would eventually run out of
28
  REM space if you never deleted older versions.
29
  REM
30
+ REM To have model data download, this variable must be set to 0 if using
31
+ REM this script. You must also set "only_use_local_files" to "0" in
32
+ REM "spaghetti_ai_script.py". If either is not that, model data will not
33
+ REM download.
34
+ REM
35
 
36
+ set HF_HUB_OFFLINE=1
37
 
38
  REM
39
  REM HF_HUB_DISABLE_TELEMETRY
 
43
  REM About:
44
  REM https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables#hfhubdisabletelemetry
45
  REM
46
+ REM DO_NOT_TRACK
47
+ REM
48
+ REM This one might do more.
49
+ REM
50
+ REM About:
51
+ REM https://github.com/huggingface/huggingface_hub/pull/1920
52
 
53
  set HF_HUB_DISABLE_TELEMETRY=1
54
+ set DO_NOT_TRACK=1
55
 
56
  REM ############################################################################
57
 
 
59
 
60
  call .venv/Scripts/activate.bat
61
 
62
+ py .venv/app_files/spaghetti_ai_script.py
63
 
64
  cmd /k
spaghetti_ai_script.py CHANGED
@@ -6,6 +6,7 @@ from diffusers import DiffusionPipeline
6
  import os
7
  import random
8
  import torchsde
 
9
 
10
  ##########
11
 
@@ -65,6 +66,71 @@ main_dir = "C:/Spaghetti_AI"
65
 
66
  ####################
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  #
69
  # Use Custom Hugging Face Cache Directory
70
  #
@@ -156,7 +222,7 @@ saved_images_folder_name = "saved_images"
156
  # Auto Open Browser From Command Prompt
157
  #
158
 
159
- auto_open_browser = 1
160
 
161
  ####################
162
 
@@ -250,7 +316,7 @@ show_messages_in_modal_on_page = 0
250
  # Suppress Hugging Face Hub Offline Status
251
  #
252
  # By default, we add messages about the current setting of
253
- # "HF_HUB_OFFLINE" in
254
  #
255
 
256
  suppress_hugging_face_hub_offline_status = 0
@@ -467,6 +533,13 @@ photoreal_online_configurations_object = {
467
 
468
  ####################
469
 
 
 
 
 
 
 
 
470
  hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0"
471
  hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler"
472
 
@@ -668,8 +741,8 @@ default_negative_prompt = ""
668
  default_width = 768
669
  default_height = 768
670
 
671
- minimum_width = 256
672
- minimum_height = 256
673
 
674
  maximum_width = 2048 # 1024
675
  maximum_height = 2048 # 1024
@@ -874,12 +947,17 @@ if script_being_run_on_hugging_face == 1:
874
  show_messages_in_modal_on_page = 0
875
  show_messages_in_command_prompt = 1
876
 
 
 
877
  if device == "cpu":
878
 
879
  # If on CPU at HuggingFace, I reduce what is available.
880
 
881
  show_image_creation_progress_log = 1
882
 
 
 
 
883
  maximum_width = 768
884
  maximum_height = 768
885
 
@@ -940,6 +1018,14 @@ if default_upscaler_steps > maximum_upscaler_steps:
940
 
941
 
942
 
 
 
 
 
 
 
 
 
943
  if allow_online_configurations == 0:
944
 
945
  base_model_array = [
@@ -979,6 +1065,8 @@ if script_being_run_on_hugging_face == 0:
979
 
980
  hugging_face_hub_is_offline = 1
981
 
 
 
982
  if suppress_hugging_face_hub_offline_status == 1:
983
 
984
  if hugging_face_hub_is_offline == 0:
@@ -1312,6 +1400,14 @@ if default_base_model == "sdxl_turbo":
1312
 
1313
 
1314
 
 
 
 
 
 
 
 
 
1315
  default_add_seed_into_pipe_field_row_visibility = False
1316
 
1317
  if is_default_config == 1:
@@ -1386,6 +1482,50 @@ if maximum_seed <= 9007199254740992:
1386
 
1387
 
1388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389
  #####################
1390
  #
1391
  # Show Message
@@ -1989,7 +2129,8 @@ def construct_pipe (
1989
  base_model_kwargs["cache_dir"] = hugging_face_cache_dir
1990
 
1991
  pipe = DiffusionPipeline.from_pretrained(
1992
- model_configuration_links_object[model_configuration_name_value],
 
1993
  **base_model_kwargs
1994
  )
1995
 
@@ -2235,7 +2376,8 @@ def construct_refiner ():
2235
  refiner_kwargs["cache_dir"] = hugging_face_cache_dir
2236
 
2237
  refiner = DiffusionPipeline.from_pretrained(
2238
- hugging_face_refiner_partial_path,
 
2239
  **refiner_kwargs
2240
  )
2241
 
@@ -2299,7 +2441,8 @@ def construct_upscaler ():
2299
  upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
2300
 
2301
  upscaler = DiffusionPipeline.from_pretrained(
2302
- hugging_face_upscaler_partial_path,
 
2303
  **upscaler_kwargs
2304
  )
2305
 
@@ -2500,8 +2643,8 @@ def create_image_function (
2500
  base_model_num_inference_steps = int(base_model_num_inference_steps)
2501
  base_model_steps_field_for_sdxl_turbo = int(base_model_steps_field_for_sdxl_turbo)
2502
  actual_seed = int(actual_seed)
2503
- refining_denoise_start_for_default_config_field_value = round(float(refining_denoise_start_for_default_config_field_value), 2)
2504
- refining_denoise_start_for_online_config_field_value = round(float(refining_denoise_start_for_online_config_field_value), 2)
2505
  refining_steps_for_sdxl_online_config_field_value = int(refining_steps_for_sdxl_online_config_field_value)
2506
  upscaling_num_inference_steps = int(upscaling_num_inference_steps)
2507
 
@@ -2528,9 +2671,19 @@ def create_image_function (
2528
 
2529
 
2530
 
2531
- current_actual_total_base_model_steps = base_model_num_inference_steps
 
 
 
 
 
 
 
 
 
 
2532
  current_actual_total_refiner_steps = 0
2533
- current_actual_total_upscaler_steps = upscaling_num_inference_steps
2534
 
2535
 
2536
 
@@ -2538,7 +2691,7 @@ def create_image_function (
2538
 
2539
  negative_prompt_text = ""
2540
  base_model_num_inference_steps = base_model_steps_field_for_sdxl_turbo
2541
- current_actual_total_base_model_steps = base_model_num_inference_steps
2542
  guidance_scale = 0
2543
 
2544
 
@@ -2596,6 +2749,7 @@ def create_image_function (
2596
  refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
2597
 
2598
 
 
2599
  refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value)
2600
  refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value)
2601
 
@@ -2844,6 +2998,10 @@ def create_image_function (
2844
 
2845
 
2846
 
 
 
 
 
2847
  if show_image_creation_progress_log == 1:
2848
 
2849
 
@@ -2897,10 +3055,14 @@ def create_image_function (
2897
 
2898
  base_model_progress_text = "Base model processing started"
2899
 
 
 
 
 
2900
  progress(
2901
  progress = (
2902
  callback_step_index,
2903
- current_actual_total_base_model_steps
2904
  ),
2905
  desc = base_model_progress_text,
2906
  unit = "base model steps"
@@ -2961,10 +3123,14 @@ def create_image_function (
2961
 
2962
  refiner_progress_text = "Refiner processing started"
2963
 
 
 
 
 
2964
  progress(
2965
  progress = (
2966
  callback_step_index,
2967
- current_actual_total_refiner_steps
2968
  ),
2969
  desc = refiner_progress_text,
2970
  unit = "refiner steps"
@@ -3024,10 +3190,14 @@ def create_image_function (
3024
 
3025
  upscaler_progress_text = "Upscaler processing started"
3026
 
 
 
 
 
3027
  progress(
3028
  progress = (
3029
  callback_step_index,
3030
- current_actual_total_upscaler_steps
3031
  ),
3032
  desc = upscaler_progress_text,
3033
  unit = "upscaler steps"
@@ -3098,7 +3268,7 @@ def create_image_function (
3098
 
3099
  upscaling_num_inference_steps = 5
3100
 
3101
- current_actual_total_upscaler_steps = upscaling_num_inference_steps
3102
 
3103
  if show_messages_in_command_prompt == 1:
3104
 
@@ -3138,7 +3308,11 @@ def create_image_function (
3138
  desc = "Refining is beginning"
3139
  )
3140
 
3141
- current_actual_total_refiner_steps = (refining_steps_for_sdxl_online_config_field_value - round(refining_steps_for_sdxl_online_config_field_value * refining_denoise_start_for_online_config_field_value))
 
 
 
 
3142
 
3143
  nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
3144
 
@@ -3146,7 +3320,7 @@ def create_image_function (
3146
  "Refiner? Yes",
3147
  "Refiner denoise start %: " + nice_refiner_denoise_start,
3148
  "Refiner number of iterations: " + str(refining_steps_for_sdxl_online_config_field_value),
3149
- "Actual Refining Steps: " + str(current_actual_total_refiner_steps)
3150
  ]
3151
 
3152
  image = refiner(
@@ -3250,14 +3424,11 @@ def create_image_function (
3250
 
3251
  default_steps_in_diffusers = 50
3252
 
3253
- current_actual_total_refiner_steps = (default_steps_in_diffusers - round(default_steps_in_diffusers * refining_denoise_start_for_online_config_field_value))
3254
 
3255
- refiner_info_for_info_about_prompt_lines_array = [
3256
- "Refiner? Yes",
3257
- "Refiner denoise start %: " + nice_refiner_denoise_start,
3258
- "Refiner number of iterations: " + str(default_steps_in_diffusers),
3259
- "Actual Refining Steps: " + str(current_actual_total_refiner_steps)
3260
- ]
3261
 
3262
  image = refiner(
3263
  Prompt,
@@ -3268,6 +3439,15 @@ def create_image_function (
3268
  callback_on_step_end=callback_to_do_for_refiner_progress
3269
  ).images[0]
3270
 
 
 
 
 
 
 
 
 
 
3271
  else:
3272
 
3273
  if show_messages_in_command_prompt == 1:
@@ -3306,6 +3486,12 @@ def create_image_function (
3306
 
3307
 
3308
 
 
 
 
 
 
 
3309
  #
3310
  #
3311
  #
@@ -3316,11 +3502,18 @@ def create_image_function (
3316
 
3317
  if use_refiner == 1:
3318
 
3319
- if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1:
 
 
 
3320
 
3321
  denoising_end = refining_denoise_start_for_default_config_field_value
3322
 
3323
- current_actual_total_base_model_steps = round(base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value)
 
 
 
 
3324
 
3325
  else:
3326
 
@@ -3332,44 +3525,11 @@ def create_image_function (
3332
 
3333
  output_type_before_refiner = "latent"
3334
 
3335
- current_actual_total_refiner_steps = (base_model_num_inference_steps - round(base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value))
3336
 
3337
- refiner_info_for_info_about_prompt_lines_array = [
3338
- "Refiner? Yes"
3339
- ]
3340
-
3341
- nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
3342
 
3343
- if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1:
3344
-
3345
- refiner_info_for_info_about_prompt_lines_array.extend([
3346
- "Set \"denoising_end\" in base model generation? Yes",
3347
- "Base model denoise end %: " + nice_refiner_denoise_start,
3348
- "Actual Base Model Steps: " + str(current_actual_total_base_model_steps)
3349
- ])
3350
-
3351
- else:
3352
-
3353
- refiner_info_for_info_about_prompt_lines_array.extend([
3354
- "Set \"denoising_end\" in base model generation? No",
3355
- ])
3356
-
3357
- refiner_info_for_info_about_prompt_lines_array.extend([
3358
- "Refiner denoise start %: " + nice_refiner_denoise_start,
3359
- "Actual Refining Steps: " + str(current_actual_total_refiner_steps)
3360
- ])
3361
-
3362
- if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1:
3363
-
3364
- refiner_info_for_info_about_prompt_lines_array.extend([
3365
- "Base model output in latent space before refining? Yes",
3366
- ])
3367
-
3368
- else:
3369
-
3370
- refiner_info_for_info_about_prompt_lines_array.extend([
3371
- "Base model output in latent space before refining? No",
3372
- ])
3373
 
3374
  if use_upscaler == 1:
3375
 
@@ -3499,6 +3659,49 @@ def create_image_function (
3499
 
3500
  image_to_return = refined_image
3501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502
  else:
3503
 
3504
  if use_upscaler == 1:
@@ -3755,7 +3958,10 @@ def create_image_function (
3755
 
3756
  info_about_prompt = "\n".join(info_about_prompt_lines_array)
3757
 
3758
- output_text_field_update = info_about_prompt
 
 
 
3759
 
3760
 
3761
 
@@ -3821,7 +4027,7 @@ def create_image_function (
3821
  saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt"
3822
 
3823
  prompt_info_file_handle = open(saved_text_file_path_and_file, "w")
3824
- prompt_info_file_handle.writelines(output_text_field_update)
3825
  prompt_info_file_handle.close()
3826
 
3827
 
@@ -3831,7 +4037,7 @@ def create_image_function (
3831
  )
3832
 
3833
  image_gallery_array_state_value.insert(0, image_to_return)
3834
- prompt_information_array_state_value.insert(0, output_text_field_update)
3835
 
3836
  output_image_gallery_field_update = gr.Gallery(
3837
  value = image_gallery_array_state_value,
@@ -3913,17 +4119,7 @@ def cancel_image_processing():
3913
  # Download Data From HuggingFace
3914
  #
3915
  # This will download a lot of data at once rather than waiting until you
3916
- # use each model. This is access by having this at the end of the URL:
3917
- #
3918
- # ?download_data=1
3919
- #
3920
- # Like this:
3921
- # http://127.0.0.1:7860/?download_data=1
3922
- #
3923
- # A "1" will download only the default model configuration for each main
3924
- # model, as well as refiner and upscaler data. A "2" will download all
3925
- # model data needed in "model_configuration_links_object", including
3926
- # online configurations you may not want.
3927
  #
3928
  #####################
3929
 
@@ -4463,8 +4659,56 @@ def set_base_model_and_model_configuration_from_query_params(
4463
  # Hide border when yield is used:
4464
  # https://github.com/gradio-app/gradio/issues/5479
4465
  # .generating {border: none !important;}
 
 
 
4466
 
4467
- css_to_use = "footer{display:none !important}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4468
 
4469
  with gr.Blocks(
4470
  title = "Spaghetti AI",
@@ -4581,7 +4825,10 @@ with gr.Blocks(
4581
  step = refiner_denoise_start_input_slider_steps
4582
  )
4583
 
4584
- with gr.Row():
 
 
 
4585
 
4586
  refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox(
4587
  label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner",
@@ -4989,11 +5236,14 @@ with gr.Blocks(
4989
  with gr.Row():
4990
 
4991
  output_text_field = gr.Textbox(
 
4992
  label = "Prompt Information:",
4993
  value = "After an image is generated, its generation information will appear here. All of this information is also embedded in the image itself. If you open the image in a text program, it will appear at the top." + additional_prompt_info_html,
4994
  show_copy_button = True,
4995
- lines = 10,
4996
- max_lines = 20,
 
 
4997
  container = True
4998
  )
4999
 
@@ -5174,9 +5424,10 @@ async (
5174
  var onlineConfigurationsObject = {3};
5175
  var modelConfigurationForceRefinerObject = {4};
5176
  var modelConfigurationIncludeRefinerNumberOfStepsObject = {5};
5177
- var modelConfigurationHideUpscalerStepsObject = {6};
 
5178
 
5179
- var allowOnlineConfigurations = {7};
5180
 
5181
  var baseModelFullNamesToBaseModelIdConversion = {{}};
5182
  Object.keys(baseModelNamesObject).forEach(key => {{
@@ -5314,11 +5565,19 @@ async (
5314
 
5315
  }}
5316
 
5317
- var refiningNumberOfIterationsForOnlineConfigFieldDisplay = "none";
5318
 
5319
  if (Object.keys(modelConfigurationIncludeRefinerNumberOfStepsObject).includes(modelConfigurationNameValue)) {{
5320
 
5321
- refiningNumberOfIterationsForOnlineConfigFieldDisplay = "block";
 
 
 
 
 
 
 
 
5322
 
5323
  }}
5324
 
@@ -5333,7 +5592,8 @@ async (
5333
  document.getElementById("refining_selection_automatically_selected_message_field_id").style.display = refiningSelectionAutomaticallySelectedMessageFieldDisplay;
5334
  document.getElementById("refining_selection_online_config_normal_field_id").style.display = refiningSelectionOnlineConfigNormalFieldDisplay;
5335
  document.getElementById("refining_selection_online_config_automatically_selected_field_id").style.display = refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay;
5336
- document.getElementById("refining_steps_for_sdxl_online_config_field_row_id").style.display = refiningNumberOfIterationsForOnlineConfigFieldDisplay;
 
5337
  document.getElementById("upscaling_num_inference_steps_field_row_id").style.display = upscalingNumInferenceStepsFieldDisplay;
5338
 
5339
  }}
@@ -5347,6 +5607,7 @@ async (
5347
  online_configurations_object,
5348
  model_configuration_force_refiner_object,
5349
  model_configuration_include_refiner_number_of_steps_object,
 
5350
  model_configuration_hide_upscaler_steps_object,
5351
  allow_online_configurations
5352
  )
@@ -5607,6 +5868,8 @@ async () => {{
5607
 
5608
  window.modelConfigurationDropdownFieldValuesObject = {{{0}}};
5609
 
 
 
5610
  }}
5611
 
5612
  """.format(
 
6
  import os
7
  import random
8
  import torchsde
9
+ from math import floor, copysign
10
 
11
  ##########
12
 
 
66
 
67
  ####################
68
 
69
+ #
70
+ # Only Use Local Files (IMPORTANT)
71
+ #
72
+ # Please read the section below the configuration. The app will not work
73
+ # unless this is set to "0" in order for it to download model data.
74
+ #
75
+ # 0 False (Model data will be downloaded)
76
+ # 1 True (Model data will not be downloaded, even if missing)
77
+ #
78
+
79
+ only_use_local_files = 0
80
+
81
+ #
82
+ # This is an important value. HuggingFace doesn't just download a model
83
+ # once and never try again. If something is updated, it will download it
84
+ # again. It will not delete the older version. This could eventually
85
+ # allow it to use all the space on your drive as that could potentially
86
+ # add 5 to 15 gigabytes of data each time a model updates. This variable
87
+ # forces the script not to check for model data. However, when set to
88
+ # "1", the app will not download any data for the models, including for
89
+ # refining and upscaling. That would mean the app will not work. You need
90
+ # to set this to "0" when you need to download data. While there are ways
91
+ # to not download as much data, it is not as simple. I prefer this
92
+ # method. You could check occasionally and manually delete model data,
93
+ # but if you're like me, you would forget.
94
+ #
95
+ # To download data you can use each model in the app when this is set to
96
+ # "0" and it will download the needed data. That does require you to use
97
+ # each model. Then you could set this back to "1". However, if you want
98
+ # to download all the data, without having to use each model, including
99
+ # using refining and upscaling at some point at least once, there are
100
+ # two options.
101
+ #
102
+ # To download all default model data, meaning the default model selected
103
+ # for each base model (set in
104
+ # "base_model_model_configuration_defaults_object"), including data for
105
+ # the refiner and upscaler, add this to the end of the url for option 1:
106
+ #
107
+ # ?download_data=1
108
+ #
109
+ # Like this for option 1:
110
+ # http://127.0.0.1:7860/?download_data=1
111
+ #
112
+ # To download all model data, for everything in the
113
+ # "model_configuration_links_object", including data for the refiner and
114
+ # upscaler, add this to the end of the url for option 2:
115
+ #
116
+ # ?download_data=2
117
+ #
118
+ # Both options will download dozens of gigabytes of data, most especially
119
+ # the second option, so you may not want to do that. Before you do that,
120
+ # make sure you have removed from the configurations the models you do
121
+ # not want. For option 1, remove the base models you do not want to use
122
+ # in "base_model_array". For option 2, remove the model configurations
123
+ # you do not want to use in
124
+ # "base_model_object_of_model_configuration_arrays".
125
+ #
126
+ # To have model data download, this variable must be set to 0. You must
127
+ # also set "HF_HUB_OFFLINE" to "0" in "spaghetti_ai_launcher.bat" if you
128
+ # use that script. If you use that script, and either is not that, model
129
+ # data will not download.
130
+ #
131
+
132
+ ####################
133
+
134
  #
135
  # Use Custom Hugging Face Cache Directory
136
  #
 
222
  # Auto Open Browser From Command Prompt
223
  #
224
 
225
+ auto_open_browser = 0
226
 
227
  ####################
228
 
 
316
  # Suppress Hugging Face Hub Offline Status
317
  #
318
  # By default, we add messages about the current setting of
319
+ # "HF_HUB_OFFLINE".
320
  #
321
 
322
  suppress_hugging_face_hub_offline_status = 0
 
533
 
534
  ####################
535
 
536
+ base_models_not_supporting_denoising_end_for_base_model_object = {
537
+ "photoreal": 1,
538
+ "sd_1_5_runwayml": 1
539
+ }
540
+
541
+ ####################
542
+
543
  hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0"
544
  hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler"
545
 
 
741
  default_width = 768
742
  default_height = 768
743
 
744
+ minimum_width = 64
745
+ minimum_height = 64
746
 
747
  maximum_width = 2048 # 1024
748
  maximum_height = 2048 # 1024
 
947
  show_messages_in_modal_on_page = 0
948
  show_messages_in_command_prompt = 1
949
 
950
+ only_use_local_files = 0
951
+
952
  if device == "cpu":
953
 
954
  # If on CPU at HuggingFace, I reduce what is available.
955
 
956
  show_image_creation_progress_log = 1
957
 
958
+ minimum_width = 256
959
+ minimum_height = 256
960
+
961
  maximum_width = 768
962
  maximum_height = 768
963
 
 
1018
 
1019
 
1020
 
1021
+ only_use_local_files_bool = False
1022
+
1023
+ if only_use_local_files == 1:
1024
+
1025
+ only_use_local_files_bool = True
1026
+
1027
+
1028
+
1029
  if allow_online_configurations == 0:
1030
 
1031
  base_model_array = [
 
1065
 
1066
  hugging_face_hub_is_offline = 1
1067
 
1068
+ only_use_local_files = 1
1069
+
1070
  if suppress_hugging_face_hub_offline_status == 1:
1071
 
1072
  if hugging_face_hub_is_offline == 0:
 
1400
 
1401
 
1402
 
1403
+ default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility = True
1404
+
1405
+ if default_base_model in base_models_not_supporting_denoising_end_for_base_model_object:
1406
+
1407
+ default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility = False
1408
+
1409
+
1410
+
1411
  default_add_seed_into_pipe_field_row_visibility = False
1412
 
1413
  if is_default_config == 1:
 
1482
 
1483
 
1484
 
1485
+ #####################
1486
+ #
1487
+ # Rounded Number
1488
+ #
1489
+ # A better, and seemingly more accurate, way to round.
1490
+ #
1491
+ # https://realpython.com/python-rounding/
1492
+
1493
+ #####################
1494
+
1495
+ def rounded_number(n, decimals=0):
1496
+ n = float(n)
1497
+ multiplier = 10**decimals
1498
+ rounded_abs = (floor(abs(n) * multiplier + 0.5) / multiplier)
1499
+ rounded_value = round(copysign(rounded_abs, n), decimals)
1500
+ return rounded_value
1501
+
1502
+
1503
+
1504
+
1505
+
1506
+
1507
+
1508
+ #####################
1509
+ #
1510
+ # Rounded Number
1511
+ #
1512
+ # Format number to a certain number of decimal places and output it to a string.
1513
+ #
1514
+ # https://stackoverflow.com/questions/1995615/how-can-i-format-a-decimal-to-always-show-2-decimal-places
1515
+
1516
+ #####################
1517
+
1518
+ def formatted_number(n, decimals=0):
1519
+ rounded_value = rounded_number(n, decimals)
1520
+ formatted_value = '{:.{prec}f}'.format(rounded_value, prec=decimals)
1521
+ return formatted_value
1522
+
1523
+
1524
+
1525
+
1526
+
1527
+
1528
+
1529
  #####################
1530
  #
1531
  # Show Message
 
2129
  base_model_kwargs["cache_dir"] = hugging_face_cache_dir
2130
 
2131
  pipe = DiffusionPipeline.from_pretrained(
2132
+ pretrained_model_name_or_path = model_configuration_links_object[model_configuration_name_value],
2133
+ local_files_only = only_use_local_files_bool,
2134
  **base_model_kwargs
2135
  )
2136
 
 
2376
  refiner_kwargs["cache_dir"] = hugging_face_cache_dir
2377
 
2378
  refiner = DiffusionPipeline.from_pretrained(
2379
+ pretrained_model_name_or_path = hugging_face_refiner_partial_path,
2380
+ local_files_only = only_use_local_files_bool,
2381
  **refiner_kwargs
2382
  )
2383
 
 
2441
  upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
2442
 
2443
  upscaler = DiffusionPipeline.from_pretrained(
2444
+ pretrained_model_name_or_path = hugging_face_upscaler_partial_path,
2445
+ local_files_only = only_use_local_files_bool,
2446
  **upscaler_kwargs
2447
  )
2448
 
 
2643
  base_model_num_inference_steps = int(base_model_num_inference_steps)
2644
  base_model_steps_field_for_sdxl_turbo = int(base_model_steps_field_for_sdxl_turbo)
2645
  actual_seed = int(actual_seed)
2646
+ refining_denoise_start_for_default_config_field_value = rounded_number(refining_denoise_start_for_default_config_field_value, 2)
2647
+ refining_denoise_start_for_online_config_field_value = rounded_number(refining_denoise_start_for_online_config_field_value, 2)
2648
  refining_steps_for_sdxl_online_config_field_value = int(refining_steps_for_sdxl_online_config_field_value)
2649
  upscaling_num_inference_steps = int(upscaling_num_inference_steps)
2650
 
 
2671
 
2672
 
2673
 
2674
+ current_estimated_total_base_model_steps = base_model_num_inference_steps
2675
+ current_estimated_total_refiner_steps = 0
2676
+ current_estimated_total_upscaler_steps = upscaling_num_inference_steps
2677
+
2678
+
2679
+
2680
+ global current_actual_total_base_model_steps
2681
+ global current_actual_total_base_model_steps
2682
+ global current_actual_total_refiner_steps
2683
+
2684
+ current_actual_total_base_model_steps = 0
2685
  current_actual_total_refiner_steps = 0
2686
+ current_actual_total_upscaler_steps = 0
2687
 
2688
 
2689
 
 
2691
 
2692
  negative_prompt_text = ""
2693
  base_model_num_inference_steps = base_model_steps_field_for_sdxl_turbo
2694
+ current_estimated_total_base_model_steps = base_model_num_inference_steps
2695
  guidance_scale = 0
2696
 
2697
 
 
2749
  refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
2750
 
2751
 
2752
+
2753
  refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value)
2754
  refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value)
2755
 
 
2998
 
2999
 
3000
 
3001
+ denoising_end_applicable = 0
3002
+
3003
+
3004
+
3005
  if show_image_creation_progress_log == 1:
3006
 
3007
 
 
3055
 
3056
  base_model_progress_text = "Base model processing started"
3057
 
3058
+ global current_actual_total_base_model_steps
3059
+
3060
+ current_actual_total_base_model_steps += 1
3061
+
3062
  progress(
3063
  progress = (
3064
  callback_step_index,
3065
+ current_estimated_total_base_model_steps
3066
  ),
3067
  desc = base_model_progress_text,
3068
  unit = "base model steps"
 
3123
 
3124
  refiner_progress_text = "Refiner processing started"
3125
 
3126
+ global current_actual_total_refiner_steps
3127
+
3128
+ current_actual_total_refiner_steps += 1
3129
+
3130
  progress(
3131
  progress = (
3132
  callback_step_index,
3133
+ current_estimated_total_refiner_steps
3134
  ),
3135
  desc = refiner_progress_text,
3136
  unit = "refiner steps"
 
3190
 
3191
  upscaler_progress_text = "Upscaler processing started"
3192
 
3193
+ global current_actual_total_upscaler_steps
3194
+
3195
+ current_actual_total_upscaler_steps += 1
3196
+
3197
  progress(
3198
  progress = (
3199
  callback_step_index,
3200
+ current_estimated_total_upscaler_steps
3201
  ),
3202
  desc = upscaler_progress_text,
3203
  unit = "upscaler steps"
 
3268
 
3269
  upscaling_num_inference_steps = 5
3270
 
3271
+ current_estimated_total_upscaler_steps = upscaling_num_inference_steps
3272
 
3273
  if show_messages_in_command_prompt == 1:
3274
 
 
3308
  desc = "Refining is beginning"
3309
  )
3310
 
3311
+ current_estimated_total_refiner_steps = rounded_number(refining_steps_for_sdxl_online_config_field_value - (refining_steps_for_sdxl_online_config_field_value * refining_denoise_start_for_online_config_field_value))
3312
+
3313
+ if current_estimated_total_refiner_steps < 1:
3314
+
3315
+ current_estimated_total_refiner_steps = 1
3316
 
3317
  nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
3318
 
 
3320
  "Refiner? Yes",
3321
  "Refiner denoise start %: " + nice_refiner_denoise_start,
3322
  "Refiner number of iterations: " + str(refining_steps_for_sdxl_online_config_field_value),
3323
+ "Actual Refining Steps: " + formatted_number(current_actual_total_refiner_steps)
3324
  ]
3325
 
3326
  image = refiner(
 
3424
 
3425
  default_steps_in_diffusers = 50
3426
 
3427
+ current_estimated_total_refiner_steps = rounded_number(default_steps_in_diffusers - (default_steps_in_diffusers * refining_denoise_start_for_online_config_field_value))
3428
 
3429
+ if current_estimated_total_refiner_steps < 1:
3430
+
3431
+ current_estimated_total_refiner_steps = 1
 
 
 
3432
 
3433
  image = refiner(
3434
  Prompt,
 
3439
  callback_on_step_end=callback_to_do_for_refiner_progress
3440
  ).images[0]
3441
 
3442
+ nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value)
3443
+
3444
+ refiner_info_for_info_about_prompt_lines_array = [
3445
+ "Refiner? Yes",
3446
+ "Refiner denoise start %: " + nice_refiner_denoise_start,
3447
+ "Refiner number of iterations: " + str(default_steps_in_diffusers),
3448
+ "Actual Refining Steps: " + formatted_number(current_actual_total_refiner_steps)
3449
+ ]
3450
+
3451
  else:
3452
 
3453
  if show_messages_in_command_prompt == 1:
 
3486
 
3487
 
3488
 
3489
+ if base_model_name_value not in base_models_not_supporting_denoising_end_for_base_model_object:
3490
+
3491
+ denoising_end_applicable = 1
3492
+
3493
+
3494
+
3495
  #
3496
  #
3497
  #
 
3502
 
3503
  if use_refiner == 1:
3504
 
3505
+ if (
3506
+ (refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1) and
3507
+ (denoising_end_applicable == 1)
3508
+ ):
3509
 
3510
  denoising_end = refining_denoise_start_for_default_config_field_value
3511
 
3512
+ current_estimated_total_base_model_steps = rounded_number(base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value)
3513
+
3514
+ if current_estimated_total_base_model_steps < 1:
3515
+
3516
+ current_estimated_total_base_model_steps = 1
3517
 
3518
  else:
3519
 
 
3525
 
3526
  output_type_before_refiner = "latent"
3527
 
3528
+ current_estimated_total_refiner_steps = rounded_number(base_model_num_inference_steps - (base_model_num_inference_steps * refining_denoise_start_for_default_config_field_value))
3529
 
3530
+ if current_estimated_total_refiner_steps < 1:
 
 
 
 
3531
 
3532
+ current_estimated_total_refiner_steps = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3533
 
3534
  if use_upscaler == 1:
3535
 
 
3659
 
3660
  image_to_return = refined_image
3661
 
3662
+
3663
+
3664
+ refiner_info_for_info_about_prompt_lines_array = [
3665
+ "Refiner? Yes"
3666
+ ]
3667
+
3668
+ nice_refiner_denoise_start = str(refining_denoise_start_for_default_config_field_value)
3669
+
3670
+ if denoising_end_applicable == 1:
3671
+
3672
+ if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1:
3673
+
3674
+ refiner_info_for_info_about_prompt_lines_array.extend([
3675
+ "Set \"denoising_end\" in base model generation? Yes",
3676
+ "Base model denoise end %: " + nice_refiner_denoise_start,
3677
+ "Actual Base Model Steps: " + formatted_number(current_actual_total_base_model_steps)
3678
+ ])
3679
+
3680
+ else:
3681
+
3682
+ refiner_info_for_info_about_prompt_lines_array.extend([
3683
+ "Set \"denoising_end\" in base model generation? No",
3684
+ ])
3685
+
3686
+ refiner_info_for_info_about_prompt_lines_array.extend([
3687
+ "Refiner denoise start %: " + nice_refiner_denoise_start,
3688
+ "Actual Refining Steps: " + formatted_number(current_actual_total_refiner_steps)
3689
+ ])
3690
+
3691
+ if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1:
3692
+
3693
+ refiner_info_for_info_about_prompt_lines_array.extend([
3694
+ "Base model output in latent space before refining? Yes",
3695
+ ])
3696
+
3697
+ else:
3698
+
3699
+ refiner_info_for_info_about_prompt_lines_array.extend([
3700
+ "Base model output in latent space before refining? No",
3701
+ ])
3702
+
3703
+
3704
+
3705
  else:
3706
 
3707
  if use_upscaler == 1:
 
3958
 
3959
  info_about_prompt = "\n".join(info_about_prompt_lines_array)
3960
 
3961
+ output_text_field_update = gr.Textbox(
3962
+ value = info_about_prompt,
3963
+ lines = 12,
3964
+ )
3965
 
3966
 
3967
 
 
4027
  saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt"
4028
 
4029
  prompt_info_file_handle = open(saved_text_file_path_and_file, "w")
4030
+ prompt_info_file_handle.writelines(info_about_prompt)
4031
  prompt_info_file_handle.close()
4032
 
4033
 
 
4037
  )
4038
 
4039
  image_gallery_array_state_value.insert(0, image_to_return)
4040
+ prompt_information_array_state_value.insert(0, info_about_prompt)
4041
 
4042
  output_image_gallery_field_update = gr.Gallery(
4043
  value = image_gallery_array_state_value,
 
4119
  # Download Data From HuggingFace
4120
  #
4121
  # This will download a lot of data at once rather than waiting until you
4122
+ # use each model.
 
 
 
 
 
 
 
 
 
 
4123
  #
4124
  #####################
4125
 
 
4659
  # Hide border when yield is used:
4660
  # https://github.com/gradio-app/gradio/issues/5479
4661
  # .generating {border: none !important;}
4662
+ # I'm not currently using yield.
4663
+
4664
+ css_to_use = """
4665
 
4666
+ footer
4667
+ {
4668
+ display: none !important;
4669
+ }
4670
+
4671
+ #output_text_field_id textarea
4672
+ {
4673
+ resize: none;
4674
+ overflow-y: auto !important;
4675
+ }
4676
+
4677
+ #output_text_field_id textarea::-webkit-scrollbar
4678
+ {
4679
+ width: 15px;
4680
+ }
4681
+
4682
+ #output_text_field_id textarea::-webkit-scrollbar-track
4683
+ {
4684
+ background-color: rgb(245, 245, 245);
4685
+ border222222222222222222: 0px;
4686
+ }
4687
+
4688
+ #output_text_field_id textarea::-webkit-scrollbar-track:hover
4689
+ {
4690
+ background-color: rgb(242, 242, 242);
4691
+ }
4692
+
4693
+ #output_text_field_id textarea::-webkit-scrollbar-thumb
4694
+ {
4695
+ background-color: rgb(214, 214, 214);
4696
+ width: 100%;
4697
+ height: 60px;
4698
+ max-height: 80%;
4699
+ border: 1px solid rgb(224, 224, 224);
4700
+ }
4701
+
4702
+ #output_text_field_id textarea::-webkit-scrollbar-thumb:hover
4703
+ {
4704
+ background-color: rgb(184, 184, 184);
4705
+ }
4706
+
4707
+ a:hover {
4708
+ background-color: yellow;
4709
+ }
4710
+
4711
+ """
4712
 
4713
  with gr.Blocks(
4714
  title = "Spaghetti AI",
 
4825
  step = refiner_denoise_start_input_slider_steps
4826
  )
4827
 
4828
+ with gr.Row(
4829
+ elem_id = "refining_use_denoising_start_in_base_model_when_using_refiner_field_row_id",
4830
+ visible = default_refining_use_denoising_start_in_base_model_when_using_refiner_field_row_visibility
4831
+ ):
4832
 
4833
  refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox(
4834
  label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner",
 
5236
  with gr.Row():
5237
 
5238
  output_text_field = gr.Textbox(
5239
+ elem_id = "output_text_field_id",
5240
  label = "Prompt Information:",
5241
  value = "After an image is generated, its generation information will appear here. All of this information is also embedded in the image itself. If you open the image in a text program, it will appear at the top." + additional_prompt_info_html,
5242
  show_copy_button = True,
5243
+ lines = 6,
5244
+ # max_lines = 20,
5245
+ autoscroll = False,
5246
+ interactive = False,
5247
  container = True
5248
  )
5249
 
 
5424
  var onlineConfigurationsObject = {3};
5425
  var modelConfigurationForceRefinerObject = {4};
5426
  var modelConfigurationIncludeRefinerNumberOfStepsObject = {5};
5427
+ var baseModelsNotSupportingDenoisingEndForBaseModelObject = {6}
5428
+ var modelConfigurationHideUpscalerStepsObject = {7};
5429
 
5430
+ var allowOnlineConfigurations = {8};
5431
 
5432
  var baseModelFullNamesToBaseModelIdConversion = {{}};
5433
  Object.keys(baseModelNamesObject).forEach(key => {{
 
5565
 
5566
  }}
5567
 
5568
+ var refiningStepsForSdxlOnlineConfigFieldfigFieldDisplay = "none";
5569
 
5570
  if (Object.keys(modelConfigurationIncludeRefinerNumberOfStepsObject).includes(modelConfigurationNameValue)) {{
5571
 
5572
+ refiningStepsForSdxlOnlineConfigFieldfigFieldDisplay = "block";
5573
+
5574
+ }}
5575
+
5576
+ var refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay = "block";
5577
+
5578
+ if (Object.keys(baseModelsNotSupportingDenoisingEndForBaseModelObject).includes(baseModelFieldValue)) {{
5579
+
5580
+ refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay = "none";
5581
 
5582
  }}
5583
 
 
5592
  document.getElementById("refining_selection_automatically_selected_message_field_id").style.display = refiningSelectionAutomaticallySelectedMessageFieldDisplay;
5593
  document.getElementById("refining_selection_online_config_normal_field_id").style.display = refiningSelectionOnlineConfigNormalFieldDisplay;
5594
  document.getElementById("refining_selection_online_config_automatically_selected_field_id").style.display = refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay;
5595
+ document.getElementById("refining_steps_for_sdxl_online_config_field_row_id").style.display = refiningStepsForSdxlOnlineConfigFieldfigFieldDisplay;
5596
+ document.getElementById("refining_use_denoising_start_in_base_model_when_using_refiner_field_row_id").style.display = refiningUseDenoisingStartInBaseModelWhenUsingRefinerFieldDisplay;
5597
  document.getElementById("upscaling_num_inference_steps_field_row_id").style.display = upscalingNumInferenceStepsFieldDisplay;
5598
 
5599
  }}
 
5607
  online_configurations_object,
5608
  model_configuration_force_refiner_object,
5609
  model_configuration_include_refiner_number_of_steps_object,
5610
+ base_models_not_supporting_denoising_end_for_base_model_object,
5611
  model_configuration_hide_upscaler_steps_object,
5612
  allow_online_configurations
5613
  )
 
5868
 
5869
  window.modelConfigurationDropdownFieldValuesObject = {{{0}}};
5870
 
5871
+ document.querySelector("#output_text_field_id textarea").classList.remove("scroll-hide");
5872
+
5873
  }}
5874
 
5875
  """.format(