File size: 4,992 Bytes
1fd69b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
# -*- coding: utf-8 -*-
"""SDXL-Thumbsup.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1T0tqXsscUsDxLSt6MIiqEmNVYEuCejGL
# Training DreamBooth LoRA with Stable Diffusion XL on Trump Thumbs Up Images:
## Linking Drive
"""
from google.colab import drive
drive.mount('/content/drive')
import warnings
warnings.filterwarnings("ignore")
"""## Installing & Login to Hugging Face
"""
!pip install huggingface-hub
!git config --global credential.helper store
!huggingface-cli login
"""##Cloning Hugging Face/diffusers - [Repo](https://github.com/huggingface/diffusers)
"""
import os
import subprocess
subprocess.run(["git", "clone", "https://github.com/huggingface/diffusers"])
os.chdir("diffusers")
subprocess.run(["pip", "install", "-e", "."])
"""## Installing Requirements - Dreambooth SDXL"""
os.chdir("examples/dreambooth")
!pip install -r requirements_sdxl.txt
"""## Write Basic Configuration for Accelerate"""
from accelerate.utils import write_basic_config
write_basic_config()
"""## Load and Display Images from Drive"""
import os
import cv2
from matplotlib import pyplot as plt
dir = '/content/drive/MyDrive/SDXL/Images/thumbsup'
count = 0
max_images = 10
for img_name in os.listdir(dir):
img_path = os.path.join(dir, img_name)
if img_path.lower().endswith(('.png', '.jpg', '.jpeg')):
img = cv2.imread(img_path)
if img is None:
print(f"Failed to load image: {img_path}")
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.axis('off')
plt.show()
count += 1
if count >= max_images:
break
"""## Installing Required Libraries"""
!pip install tensorrt bitsandbytes xformers wandb
pip install --upgrade diffusers accelerate
"""## Logging into Weights and Biases"""
!wandb login
"""## Train DreamBooth LoRA Model with Stable Diffusion XL"""
!accelerate launch train_dreambooth_lora_sdxl.py \
--pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \
--instance_data_dir="/content/drive/MyDrive/SDXL/Images/thumbsup" \
--pretrained_vae_model_name_or_path="stabilityai/sdxl-vae" \
--output_dir="/content/drive/MyDrive/SDXL/Output-Complex" \
--mixed_precision="fp16" \
--instance_prompt="a high-quality photo of Trump showing thumbs up" \
--resolution=1024 \
--train_batch_size=1 \
--gradient_accumulation_steps=2 \
--learning_rate=2e-4 \
--lr_scheduler="constant_with_warmup" \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--validation_prompt="A high-quality photo of Trump showing thumbs up in a taco restaurant, detailed, sharp focus" \
--validation_epochs=15 \
--seed="42" \
--push_to_hub \
--gradient_checkpointing \
--checkpointing_steps=100 \
--use_8bit_adam \
--prior_loss_weight=0.8 \
--num_class_images=10 \
--report_to="wandb"
# Commented out IPython magic to ensure Python compatibility.
# %cd ..
!pip uninstall diffusers
!pip install -e ./diffusers
"""## Load LoRA Weights and Generate Images
"""
from huggingface_hub.repocard import RepoCard
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
pipeline = pipeline.to("cuda")
pipeline.load_lora_weights("/content/diffusers/examples/dreambooth/pytorch_lora_weights.safetensors")
image = pipeline("A high quality picture of Trump showing the thumbs up in Paris", num_inference_steps=50).images[0]
image_path = "/content/drive/MyDrive/SDXL/Output-Complex/Trump1.png"
image.save(image_path)
print("Image saved at:", image_path)
image = pipeline("A picture of Trump showing the thumbs up as a Anime character, detailed, sharp focus", num_inference_steps=50).images[0]
image_path = "/content/drive/MyDrive/SDXL/Output-Complex/Trump2.png"
image.save(image_path)
print("Image saved at:", image_path)
image = pipeline("A picture of Trump showing thumbsup in whitehouse", num_inference_steps=50).images[0]
image_path = "/content/drive/MyDrive/SDXL/Output-Complex/Trump3.png"
image.save(image_path)
print("Image saved at:", image_path)
image = pipeline("A high quality picture of Trump showing the thumbs up as The Statue of Liberty", num_inference_steps=50).images[0]
image_path = "/content/drive/MyDrive/SDXL/Output-Complex/Trump4.png"
image.save(image_path)
print("Image saved at:", image_path)
image = pipeline("A high quality picture of Trump showing thumbs up in a lake with a laptop", num_inference_steps=50).images[0]
image_path = "/content/drive/MyDrive/SDXL/Output-Complex/Trump5.png"
image.save(image_path)
print("Image saved at:", image_path)
"""## Push to Hugging Face Hub"""
from huggingface_hub import HfApi
api = HfApi()
username = "Paresh1879"
repo_name = "stable-diffusion-xl-thumbsup-extend"
api.upload_folder(repo_id=f"{username}/{repo_name}", folder_path="/content/drive/MyDrive/SDXL") |