Bernard Maltais commited on
Commit
1e41f0e
2 Parent(s): 4c341a4 bee8d36

Merge branch 'master'

Browse files
Files changed (7) hide show
  1. .gitignore +3 -0
  2. 1.png +0 -0
  3. 2.png +0 -0
  4. 3.png +0 -0
  5. README.md +28 -0
  6. by_gauzy_storms.ckpt +3 -0
  7. kohya_ss_diffusers_fine_tuning.ps1 +81 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ raw
2
+ fine_tune*
3
+ *.json
1.png ADDED
2.png ADDED
3.png ADDED
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: unknown
3
+ ---
4
+ # Gauzy Storm finetuned style Model
5
+
6
+ Produced from publicly available pictures in landscape, portrait and square format. This model is focussed on creating animal hybrid artwork.
7
+
8
+ ## Model info
9
+
10
+ The models included was trained on "multi-resolution" images.
11
+
12
+ ## Using the model
13
+
14
+ * common subject prompt tokens: `<wathever> by gauzy storms`
15
+
16
+ ## Example prompts
17
+
18
+ `bear deer by gauzy storms`:
19
+
20
+ <img src="https://huggingface.co/cyburn/gauzy_storms/resolve/main/1.png" alt="Picture." width="500"/>
21
+
22
+ `pinguin by gauzy storms`:
23
+
24
+ <img src="https://huggingface.co/cyburn/gauzy_storms/resolve/main/2.png" alt="Picture." width="500"/>
25
+
26
+ `unicorn zebra by gauzy storms`:
27
+
28
+ <img src="https://huggingface.co/cyburn/gauzy_storms/resolve/main/3.png" alt="Picture." width="500"/>
by_gauzy_storms.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f276a11f82a9c1333c5493b1af11428f0bff14e88ef548436c6e6982ffb170d
3
+ size 2132856686
kohya_ss_diffusers_fine_tuning.ps1 ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sylvia Ritter. AKA: by silvery trait
2
+
3
+ # variable values
4
+ $pretrained_model_name_or_path = "D:\models\v1-5-pruned-mse-vae.ckpt"
5
+ $train_dir = "D:\models\train_gauzy_storms"
6
+ $training_folder = "raw"
7
+
8
+ $learning_rate = 1e-6
9
+ $dataset_repeats = 40
10
+ $train_batch_size = 8
11
+ $epoch = 4
12
+ $save_every_n_epochs=1
13
+ $mixed_precision="bf16"
14
+ $num_cpu_threads_per_process=6
15
+
16
+ $max_resolution = "576,576"
17
+
18
+ # You should not have to change values past this point
19
+
20
+ # stop script on error
21
+ $ErrorActionPreference = "Stop"
22
+
23
+ # activate venv
24
+ cd d:\kohya_ss\
25
+ .\venv\Scripts\activate
26
+
27
+ # Usefull to create base caption that will be augmented on a per image basis
28
+
29
+ $caption="by gauzy storms"
30
+
31
+ $files = Get-ChildItem $train_dir\$training_folder\"*.*" -Include *.png,*.jpg,*.webp
32
+ foreach ($file in $files) {New-Item -ItemType file -Path $train_dir\$training_folder -Name "$($file.BaseName).txt" -Value $caption -Force}
33
+
34
+ # create caption json file
35
+ python D:\kohya_ss\diffusers_fine_tuning\merge_captions_to_metadata-ber.py `
36
+ --caption_extention ".txt" $train_dir"\"$training_folder $train_dir"\meta_cap.json"
37
+
38
+ # create images buckets
39
+ python D:\kohya_ss\diffusers_fine_tuning\prepare_buckets_latents-ber.py `
40
+ $train_dir"\"$training_folder `
41
+ $train_dir"\meta_cap.json" `
42
+ $train_dir"\meta_lat.json" `
43
+ $pretrained_model_name_or_path `
44
+ --batch_size 4 --max_resolution $max_resolution --mixed_precision fp16
45
+
46
+ # Get number of valid images
47
+ $image_num = Get-ChildItem "$train_dir\$training_folder" -Recurse -File -Include *.npz | Measure-Object | %{$_.Count}
48
+ $repeats = $image_num * $dataset_repeats
49
+
50
+ # calculate max_train_set
51
+ $max_train_set = [Math]::Ceiling($repeats / $train_batch_size * $epoch)
52
+
53
+
54
+ accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune.py `
55
+ --pretrained_model_name_or_path=$pretrained_model_name_or_path `
56
+ --in_json $train_dir"\meta_lat.json" `
57
+ --train_data_dir=$train_dir"\"$training_folder `
58
+ --output_dir=$train_dir"\fine_tuned2" `
59
+ --train_batch_size=$train_batch_size `
60
+ --dataset_repeats=$dataset_repeats `
61
+ --learning_rate=$learning_rate `
62
+ --max_train_steps=$max_train_set `
63
+ --use_8bit_adam --xformers `
64
+ --mixed_precision=$mixed_precision `
65
+ --save_every_n_epochs=$save_every_n_epochs `
66
+ --train_text_encoder `
67
+ --save_precision="fp16"
68
+
69
+ # accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune_v1-ber.py `
70
+ # --pretrained_model_name_or_path=$train_dir"\fine_tuned\last.ckpt" `
71
+ # --in_json $train_dir"\meta_lat.json" `
72
+ # --train_data_dir=$train_dir"\"$training_folder `
73
+ # --output_dir=$train_dir"\fine_tuned2" `
74
+ # --train_batch_size=$train_batch_size `
75
+ # --dataset_repeats=$([Math]::Ceiling($dataset_repeats / 2)) `
76
+ # --learning_rate=$learning_rate `
77
+ # --max_train_steps=$([Math]::Ceiling($max_train_set / 2)) `
78
+ # --use_8bit_adam --xformers `
79
+ # --mixed_precision=$mixed_precision `
80
+ # --save_every_n_epochs=$save_every_n_epochs `
81
+ # --save_half