fffiloni commited on
Commit
c759cbd
1 Parent(s): b1e9972

model card template update

Browse files
Files changed (1) hide show
  1. train_dreambooth_lora_sdxl.py +20 -5
train_dreambooth_lora_sdxl.py CHANGED
@@ -90,7 +90,7 @@ Last checkpoint saved: {last_checkpoint}
90
 
91
  These are LoRA adaption weights for {base_model}.
92
 
93
- The weights were trained on the concept prompt:
94
  ```
95
  {prompt}
96
  ```
@@ -157,7 +157,7 @@ datasets:
157
  model_card = f"""
158
  # LoRA DreamBooth - {repo_id}
159
 
160
- These are LoRA adaption weights for {base_model}.
161
 
162
  The weights were trained on the concept prompt:
163
  ```
@@ -166,6 +166,7 @@ The weights were trained on the concept prompt:
166
  Use this keyword to trigger your custom model in your prompts.
167
 
168
  LoRA for the text encoder was enabled: {train_text_encoder}.
 
169
  Special VAE used for training: {vae_path}.
170
 
171
  ## Usage
@@ -186,6 +187,8 @@ To just use the base model, you can run:
186
  import torch
187
  from diffusers import DiffusionPipeline, AutoencoderKL
188
 
 
 
189
  vae = AutoencoderKL.from_pretrained('{vae_path}', torch_dtype=torch.float16)
190
 
191
  pipe = DiffusionPipeline.from_pretrained(
@@ -194,14 +197,26 @@ pipe = DiffusionPipeline.from_pretrained(
194
  use_safetensors=True
195
  )
196
 
197
- pipe.to("cuda")
198
 
199
  # This is where you load your trained weights
200
- pipe.load_lora_weights('{repo_id}')
 
 
 
 
 
 
 
 
201
 
202
  prompt = "A majestic {prompt} jumping from a big stone at night"
203
 
204
- image = pipe(prompt=prompt, num_inference_steps=50).images[0]
 
 
 
 
205
  ```
206
  """
207
  with open(os.path.join(repo_folder, "README.md"), "w") as f:
 
90
 
91
  These are LoRA adaption weights for {base_model}.
92
 
93
+ The weights is currently trained on the concept prompt:
94
  ```
95
  {prompt}
96
  ```
 
157
  model_card = f"""
158
  # LoRA DreamBooth - {repo_id}
159
 
160
+ These are LoRA adaption weights for {base_model} trained on @fffiloni's SD-XL trainer.
161
 
162
  The weights were trained on the concept prompt:
163
  ```
 
166
  Use this keyword to trigger your custom model in your prompts.
167
 
168
  LoRA for the text encoder was enabled: {train_text_encoder}.
169
+
170
  Special VAE used for training: {vae_path}.
171
 
172
  ## Usage
 
187
  import torch
188
  from diffusers import DiffusionPipeline, AutoencoderKL
189
 
190
+ device = "cuda" if torch.cuda.is_available() else "cpu"
191
+
192
  vae = AutoencoderKL.from_pretrained('{vae_path}', torch_dtype=torch.float16)
193
 
194
  pipe = DiffusionPipeline.from_pretrained(
 
197
  use_safetensors=True
198
  )
199
 
200
+ pipe.to(device)
201
 
202
  # This is where you load your trained weights
203
+
204
+ specific_safetensors = "pytorch_lora_weights.safetensors"
205
+ lora_scale = 0.9
206
+
207
+ pipe.load_lora_weights(
208
+ '{repo_id}',
209
+ weight_name = specific_safetensors,
210
+ # use_auth_token = True
211
+ )
212
 
213
  prompt = "A majestic {prompt} jumping from a big stone at night"
214
 
215
+ image = pipe(
216
+ prompt=prompt,
217
+ num_inference_steps=50,
218
+ cross_attention_kwargs=\{"scale": lora_scale\}
219
+ ).images[0]
220
  ```
221
  """
222
  with open(os.path.join(repo_folder, "README.md"), "w") as f: