ozgurkara commited on
Commit
2536180
1 Parent(s): e4a5521
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
  results/*
2
  pretrained_models/*
3
  gradio_cached_examples/*
4
- generated/*
 
 
1
  results/*
2
  pretrained_models/*
3
  gradio_cached_examples/*
4
+ generated/*
5
+ CIVIT_AI/diffusers_models/*
CIVIT_AI/convert.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Conversion script for the LDM checkpoints. """
16
+
17
+ import argparse
18
+ import importlib
19
+
20
+ import torch
21
+
22
+ from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
23
+
24
+
25
+ if __name__ == "__main__":
26
+ parser = argparse.ArgumentParser()
27
+
28
+ parser.add_argument(
29
+ "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
30
+ )
31
+ # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
32
+ parser.add_argument(
33
+ "--original_config_file",
34
+ default=None,
35
+ type=str,
36
+ help="The YAML config file corresponding to the original architecture.",
37
+ )
38
+ parser.add_argument(
39
+ "--num_in_channels",
40
+ default=None,
41
+ type=int,
42
+ help="The number of input channels. If `None` number of input channels will be automatically inferred.",
43
+ )
44
+ parser.add_argument(
45
+ "--scheduler_type",
46
+ default="pndm",
47
+ type=str,
48
+ help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
49
+ )
50
+ parser.add_argument(
51
+ "--pipeline_type",
52
+ default=None,
53
+ type=str,
54
+ help=(
55
+ "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
56
+ ". If `None` pipeline will be automatically inferred."
57
+ ),
58
+ )
59
+ parser.add_argument(
60
+ "--image_size",
61
+ default=None,
62
+ type=int,
63
+ help=(
64
+ "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
65
+ " Base. Use 768 for Stable Diffusion v2."
66
+ ),
67
+ )
68
+ parser.add_argument(
69
+ "--prediction_type",
70
+ default=None,
71
+ type=str,
72
+ help=(
73
+ "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
74
+ " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
75
+ ),
76
+ )
77
+ parser.add_argument(
78
+ "--extract_ema",
79
+ action="store_true",
80
+ help=(
81
+ "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
82
+ " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
83
+ " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
84
+ ),
85
+ )
86
+ parser.add_argument(
87
+ "--upcast_attention",
88
+ action="store_true",
89
+ help=(
90
+ "Whether the attention computation should always be upcasted. This is necessary when running stable"
91
+ " diffusion 2.1."
92
+ ),
93
+ )
94
+ parser.add_argument(
95
+ "--from_safetensors",
96
+ action="store_true",
97
+ help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
98
+ )
99
+ parser.add_argument(
100
+ "--to_safetensors",
101
+ action="store_true",
102
+ help="Whether to store pipeline in safetensors format or not.",
103
+ )
104
+ parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
105
+ parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
106
+ parser.add_argument(
107
+ "--stable_unclip",
108
+ type=str,
109
+ default=None,
110
+ required=False,
111
+ help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
112
+ )
113
+ parser.add_argument(
114
+ "--stable_unclip_prior",
115
+ type=str,
116
+ default=None,
117
+ required=False,
118
+ help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
119
+ )
120
+ parser.add_argument(
121
+ "--clip_stats_path",
122
+ type=str,
123
+ help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
124
+ required=False,
125
+ )
126
+ parser.add_argument(
127
+ "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
128
+ )
129
+ parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
130
+ parser.add_argument(
131
+ "--vae_path",
132
+ type=str,
133
+ default=None,
134
+ required=False,
135
+ help="Set to a path, hub id to an already converted vae to not convert it again.",
136
+ )
137
+ parser.add_argument(
138
+ "--pipeline_class_name",
139
+ type=str,
140
+ default=None,
141
+ required=False,
142
+ help="Specify the pipeline class name",
143
+ )
144
+
145
+ args = parser.parse_args()
146
+
147
+ if args.pipeline_class_name is not None:
148
+ library = importlib.import_module("diffusers")
149
+ class_obj = getattr(library, args.pipeline_class_name)
150
+ pipeline_class = class_obj
151
+ else:
152
+ pipeline_class = None
153
+
154
+ pipe = download_from_original_stable_diffusion_ckpt(
155
+ checkpoint_path=args.checkpoint_path,
156
+ original_config_file=args.original_config_file,
157
+ # config_files=args.config_files,
158
+ image_size=args.image_size,
159
+ prediction_type=args.prediction_type,
160
+ model_type=args.pipeline_type,
161
+ extract_ema=args.extract_ema,
162
+ scheduler_type=args.scheduler_type,
163
+ num_in_channels=args.num_in_channels,
164
+ upcast_attention=args.upcast_attention,
165
+ from_safetensors=args.from_safetensors,
166
+ device=args.device,
167
+ stable_unclip=args.stable_unclip,
168
+ stable_unclip_prior=args.stable_unclip_prior,
169
+ clip_stats_path=args.clip_stats_path,
170
+ controlnet=args.controlnet,
171
+ vae_path=args.vae_path,
172
+ pipeline_class=pipeline_class,
173
+ )
174
+
175
+ if args.half:
176
+ pipe.to(torch_dtype=torch.float16)
177
+
178
+ if args.controlnet:
179
+ # only save the controlnet model
180
+ pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
181
+ else:
182
+ pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
__pycache__/app.cpython-38.pyc CHANGED
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
 
app.py CHANGED
@@ -76,7 +76,6 @@ def run(*args):
76
  num_inversion_step = 20
77
  cond_step_start = 0.0
78
  give_control_inversion = True
79
- model_id = 'SD 1.5'
80
  inversion_prompt = ''
81
  save_folder = ''
82
  list_of_inputs = [x for x in args]
@@ -111,7 +110,7 @@ def run(*args):
111
  input_ns.save_folder = save_folder
112
 
113
  input_ns.seed = list_of_inputs[11]
114
- input_ns.model_id = const.MODEL_IDS[model_id]
115
  # input_ns.width = list_of_inputs[23]
116
  # input_ns.height = list_of_inputs[24]
117
  # input_ns.original_size = list_of_inputs[25]
@@ -123,7 +122,6 @@ def run(*args):
123
  if str(input_ns.model_id) != 'None':
124
  input_ns.model_id = install_civitai_model(input_ns.model_id)
125
 
126
-
127
  device = init_device()
128
  input_ns = init_paths(input_ns)
129
 
@@ -195,8 +193,6 @@ with block:
195
  </h2>
196
  </div>
197
  """)
198
- with gr.Row():
199
- gr.Markdown('## RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models')
200
  with gr.Row():
201
  with gr.Column():
202
  with gr.Row():
@@ -254,6 +250,9 @@ with block:
254
  with gr.Row():
255
  positive_prompts = gr.Textbox(label='Positive prompts')
256
  negative_prompts = gr.Textbox(label='Negative prompts')
 
 
 
257
  with gr.Row():
258
  preprocess_list = ['depth_zoe', 'lineart_realistic', 'lineart_standard', 'softedge_hed']
259
  preprocess_name = gr.Dropdown(preprocess_list,
@@ -309,7 +308,7 @@ with block:
309
  step=1)
310
 
311
 
312
- inputs = [input_path, preprocess_name, controlnet_conditioning_scale, controlnet_guidance_end, controlnet_guidance_start, grid_size, sample_size, pad, guidance_scale, negative_prompts, positive_prompts, seed]
313
 
314
  run_button.click(fn=run,
315
  inputs=inputs,
 
76
  num_inversion_step = 20
77
  cond_step_start = 0.0
78
  give_control_inversion = True
 
79
  inversion_prompt = ''
80
  save_folder = ''
81
  list_of_inputs = [x for x in args]
 
110
  input_ns.save_folder = save_folder
111
 
112
  input_ns.seed = list_of_inputs[11]
113
+ input_ns.model_id = const.MODEL_IDS[list_of_inputs[12]]
114
  # input_ns.width = list_of_inputs[23]
115
  # input_ns.height = list_of_inputs[24]
116
  # input_ns.original_size = list_of_inputs[25]
 
122
  if str(input_ns.model_id) != 'None':
123
  input_ns.model_id = install_civitai_model(input_ns.model_id)
124
 
 
125
  device = init_device()
126
  input_ns = init_paths(input_ns)
127
 
 
193
  </h2>
194
  </div>
195
  """)
 
 
196
  with gr.Row():
197
  with gr.Column():
198
  with gr.Row():
 
250
  with gr.Row():
251
  positive_prompts = gr.Textbox(label='Positive prompts')
252
  negative_prompts = gr.Textbox(label='Negative prompts')
253
+ model_id = gr.Dropdown(const.MODEL_IDS,
254
+ label='Model id',
255
+ value='SD 1.5')
256
  with gr.Row():
257
  preprocess_list = ['depth_zoe', 'lineart_realistic', 'lineart_standard', 'softedge_hed']
258
  preprocess_name = gr.Dropdown(preprocess_list,
 
308
  step=1)
309
 
310
 
311
+ inputs = [input_path, preprocess_name, controlnet_conditioning_scale, controlnet_guidance_end, controlnet_guidance_start, grid_size, sample_size, pad, guidance_scale, negative_prompts, positive_prompts, seed, model_id]
312
 
313
  run_button.click(fn=run,
314
  inputs=inputs,
utils/__pycache__/constants.cpython-38.pyc CHANGED
Binary files a/utils/__pycache__/constants.cpython-38.pyc and b/utils/__pycache__/constants.cpython-38.pyc differ
 
utils/constants.py CHANGED
@@ -27,15 +27,15 @@ PREPROCESSOR_DICT = {
27
  }
28
 
29
  MODEL_IDS = {
30
- 'Realistic Vision V5.1': '130072',
31
  'Realistic Vision V6.0' : '245598',
32
  'MajicMIXRealisticV7' : '176425',
33
  'DreamShaper' : '128713',
34
  'EpicPhotoGasm' : '223670',
35
- 'DivineEleganceMix (Anime)': '238656',
36
  'GhostMix (Anime)': '76907',
37
- 'CetusMix (Anime)': '105924',
38
- 'Counterfeit (Anime)': '57618',
39
  'SD 1.5': 'None'
40
  }
41
 
 
27
  }
28
 
29
  MODEL_IDS = {
30
+ # 'Realistic Vision V5.1': '130072',
31
  'Realistic Vision V6.0' : '245598',
32
  'MajicMIXRealisticV7' : '176425',
33
  'DreamShaper' : '128713',
34
  'EpicPhotoGasm' : '223670',
35
+ # 'DivineEleganceMix (Anime)': '238656',
36
  'GhostMix (Anime)': '76907',
37
+ # 'CetusMix (Anime)': '105924',
38
+ # 'Counterfeit (Anime)': '57618',
39
  'SD 1.5': 'None'
40
  }
41