File size: 5,417 Bytes
ae681c2 c64f42e ae681c2 c64f42e ae681c2 c64f42e 2ff3c3a c64f42e 770b653 c64f42e ae681c2 c64f42e ae681c2 24dbe85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
---
base_model: stabilityai/stable-diffusion-xl-base-1.0
library_name: diffusers
license: openrail++
tags:
- text-to-image
- text-to-image
- diffusers-training
- diffusers
- stable-diffusion-2
- stable-diffusion-2-diffusers
instance_prompt: <leaf microstructure>
widget: []
---
# Stable Diffusion 2.x Fine-tuned with Leaf Images
## Model description
These are fine-tuned weights for the ```stabilityai/stable-diffusion-2``` model. This is a full fine-tune of the model using DreamBooth.
## Trigger keywords
The following image were used during fine-tuning using the keyword \<leaf microstructure\>:
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/sI_exTnLy6AtOFDX1-7eq.png)
You should use <leaf microstructure> to trigger the image generation.
## How to use
Defining some helper functions:
```python
from diffusers import DiffusionPipeline
import torch
import os
from datetime import datetime
from PIL import Image
def generate_filename(base_name, extension=".png"):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
return f"{base_name}_{timestamp}{extension}"
def save_image(image, directory, base_name="image_grid"):
filename = generate_filename(base_name)
file_path = os.path.join(directory, filename)
image.save(file_path)
print(f"Image saved as {file_path}")
def image_grid(imgs, rows, cols, save=True, save_dir='generated_images', base_name="image_grid",
save_individual_files=False):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
assert len(imgs) == rows * cols
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
if save_individual_files:
save_image(img, save_dir, base_name=base_name+f'_{i}-of-{len(imgs)}_')
if save and save_dir:
save_image(grid, save_dir, base_name)
return grid
```
### Text-to-image
Model loading:
```python
import torch
from diffusers import DPMSolverMultistepScheduler
repo_id='lamm-mit/SD2x-leaf-inspired'
pipe = StableDiffusionPipeline.from_pretrained(repo_id,
scheduler = DPMSolverMultistepScheduler.from_pretrained(args.output_dir, subfolder="scheduler"),
torch_dtype=torch.float16,
).to("cuda")
```
Image generation:
```python
prompt = "a vase that resembles a <leaf microstructure>, high quality"
num_samples = 4
num_rows = 4
all_images = []
for _ in range(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=15).images
all_images.extend(images)
grid = image_grid(all_images, num_rows, num_samples)
grid
```
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/SI5aYv2dygJn0Y12LIqqe.png)
## Fine-tuning script
Download this script: [SD2x DreamBooth-Fine-Tune.ipynb](https://huggingface.co/lamm-mit/SD2x-leaf-inspired/resolve/main/SD2x_DreamBooth_Fine-Tune.ipynb)
You need to create a local folder ```leaf_concept_dir``` and add the leaf images (provided in this repository, see subfolder), like so:
```python
save_path='leaf_concept_dir'
urls = [
"https://www.dropbox.com/scl/fi/4s09djm4nqxmq6vhvv9si/13_.jpg?rlkey=3m2f90pjofljmlqg5uc722i6y&dl=1",
"https://www.dropbox.com/scl/fi/w4jsrf0qmrcro37nxutbx/25_.jpg?rlkey=e52gnoqaar33kwrd01h1mwcnk&dl=1",
"https://www.dropbox.com/scl/fi/x0xgavduor4cbxz0sdcd2/33_.jpg?rlkey=5htaicapahhn66wnsr23v1nxz&dl=1",
"https://www.dropbox.com/scl/fi/2grt40acypah9h9ok607q/72_.jpg?rlkey=bl6vfv0rcas2ygsz6o3behlst&dl=1",
"https://www.dropbox.com/scl/fi/ecaf9agzdj2cawspmyt5i/117_.jpg?rlkey=oqxyk9i1wtu1wtkqadd6ylyjj&dl=1",
"https://www.dropbox.com/scl/fi/gw3p73r99fleozr6ckfa3/126_.jpg?rlkey=6n7kqaklczshht1ntyqunh2lt&dl=1",
## You can add additional images here
]
images = list(filter(None,[download_image(url) for url in urls]))
if not os.path.exists(save_path):
os.mkdir(save_path)
[image.save(f"{save_path}/{i}.jpeg") for i, image in enumerate(images)]
image_grid(images, 1, len(images))
```
The training script is included in the Jupyter notebook.
## More examples
```python
prompt = "a conch shell on black background that resembles a <leaf microstructure>, high quality"
num_samples = 4
num_rows = 4
all_images = []
for _ in range(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=15).images
all_images.extend(images)
grid = image_grid(all_images, num_rows, num_samples)
grid
```
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/eE1xBqyVA4sP4gx6tAEGc.png)
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/Ga808aW5H27f0hPq_RNme.png)
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/r0dUyA-Gh_biy5d-4lTl0.png)
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/iEjozBWOQQwxNVuKWZ7TT.png)
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/ESvd6cCkyJZ52Cu3iYfoP.png)
![image/png](https://cdn-uploads.huggingface.co/production/uploads/623ce1c6b66fedf374859fe7/2FExqoj8TSjJoIiw4wCm6.png)
|