fffiloni commited on
Commit
c2d3b3c
1 Parent(s): 7c28e5a

update model cards

Browse files
Files changed (1) hide show
  1. train_dreambooth_lora_sdxl.py +40 -3
train_dreambooth_lora_sdxl.py CHANGED
@@ -82,18 +82,55 @@ tags:
82
  - text-to-image
83
  - diffusers
84
  - lora
85
- inference: true
86
  ---
87
  """
88
  model_card = f"""
89
  # LoRA DreamBooth - {repo_id}
90
 
91
- These are LoRA adaption weights for {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n
92
- {img_str}
 
93
 
94
  LoRA for the text encoder was enabled: {train_text_encoder}.
95
 
96
  Special VAE used for training: {vae_path}.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  """
98
  with open(os.path.join(repo_folder, "README.md"), "w") as f:
99
  f.write(yaml + model_card)
 
82
  - text-to-image
83
  - diffusers
84
  - lora
85
+ inference: false
86
  ---
87
  """
88
  model_card = f"""
89
  # LoRA DreamBooth - {repo_id}
90
 
91
+ These are LoRA adaption weights for {base_model}.
92
+
93
+ The weights were trained on the concept prompt: `{prompt}` using [DreamBooth](https://dreambooth.github.io/).
94
 
95
  LoRA for the text encoder was enabled: {train_text_encoder}.
96
 
97
  Special VAE used for training: {vae_path}.
98
+
99
+ ## Usage
100
+
101
+ Make sure to upgrade diffusers to >= 0.19.0:
102
+ ```
103
+ pip install diffusers --upgrade
104
+ ```
105
+
106
+ In addition make sure to install transformers, safetensors, accelerate as well as the invisible watermark:
107
+ ```
108
+ pip install invisible_watermark transformers accelerate safetensors
109
+ ```
110
+
111
+ To just use the base model, you can run:
112
+
113
+ ```python
114
+ import torch
115
+ from diffusers import DiffusionPipeline, AutoencoderKL
116
+
117
+ vae = AutoencoderKL.from_pretrained({vae_path}, torch_dtype=torch.float16)
118
+
119
+ pipe = DiffusionPipeline.from_pretrained(
120
+ "stabilityai/stable-diffusion-xl-base-1.0",
121
+ vae=vae, torch_dtype=torch.float16, variant="fp16",
122
+ use_safetensors=True
123
+ )
124
+
125
+ # This is where you load your trained weights
126
+ pipe.load_lora_weights({repo_id})
127
+
128
+ pipe.to("cuda")
129
+
130
+ prompt = "A majestic {prompt} jumping from a big stone at night"
131
+
132
+ image = pipe(prompt=prompt, num_inference_steps=50).images[0]
133
+ ```
134
  """
135
  with open(os.path.join(repo_folder, "README.md"), "w") as f:
136
  f.write(yaml + model_card)