Bernard Maltais commited on
Commit
0d5709d
1 Parent(s): fc1dc37

1st commit

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. 1.jpg +0 -0
  3. 2.jpg +0 -0
  4. README.md +25 -0
  5. kohya_diffusers_train.ps1 +80 -0
  6. lego_set.ckpt +3 -0
.gitattributes CHANGED
@@ -31,4 +31,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
31
  *.xz filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
 
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
31
  *.xz filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
1.jpg ADDED
2.jpg ADDED
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: unknown
3
+ ---
4
+ # Lego Set finetuned style Model
5
+
6
+ Produced from publicly available pictures in landscape, portrait and square format.
7
+
8
+ ## Model info
9
+
10
+ The models included was trained on "multi-resolution" images of "Lego Sets"
11
+
12
+ ## Using the model
13
+
14
+ * common subject prompt tokens: `<wathever> lego set`
15
+
16
+ ## Example prompts
17
+
18
+ `mcdonald restaurant AND lego set`:
19
+
20
+ <img src="https://huggingface.co/cyburn/lego_set/resolve/main/1.jpg" alt="Picture." width="500"/>
21
+
22
+ `a digital drawing of crow, skull, animal, symmetry, flower AND lego set`:
23
+
24
+ <img src="https://huggingface.co/cyburn/lego_set/resolve/main/2.jpg" alt="Picture." width="500"/>
25
+
kohya_diffusers_train.ps1 ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sylvia Ritter. AKA: by silvery trait
2
+
3
+ # variable values
4
+ $pretrained_model_name_or_path = "D:\models\v1-5-pruned-mse-vae.ckpt"
5
+ $train_dir = "D:\dreambooth\train_lego"
6
+ $training_folder = "raw"
7
+
8
+ $learning_rate = 5e-6
9
+ $dataset_repeats = 40
10
+ $train_batch_size = 8
11
+ $epoch = 4
12
+ $save_every_n_epochs=1
13
+ $mixed_precision="bf16"
14
+ $num_cpu_threads_per_process=6
15
+
16
+ $max_resolution = "512,512"
17
+
18
+ # You should not have to change values past this point
19
+
20
+ # stop script on error
21
+ $ErrorActionPreference = "Stop"
22
+
23
+ # activate venv
24
+ .\venv\Scripts\activate
25
+
26
+ # Usefull to create base caption that will be augmented on a per image basis
27
+
28
+ $caption="lego set"
29
+
30
+ $files = Get-ChildItem $train_dir\$training_folder\"*.*" -Include *.png,*.jpg,*.webp
31
+ foreach ($file in $files) {New-Item -ItemType file -Path $train_dir\$training_folder -Name "$($file.BaseName).txt" -Value $caption}
32
+
33
+
34
+ # create caption json file
35
+ python D:\kohya_ss\diffusers_fine_tuning\merge_captions_to_metadata-ber.py `
36
+ --caption_extention ".txt" $train_dir"\"$training_folder $train_dir"\meta_cap.json"
37
+
38
+ # create images buckets
39
+ python D:\kohya_ss\diffusers_fine_tuning\prepare_buckets_latents-ber.py `
40
+ $train_dir"\"$training_folder `
41
+ $train_dir"\meta_cap.json" `
42
+ $train_dir"\meta_lat.json" `
43
+ $pretrained_model_name_or_path `
44
+ --batch_size 4 --max_resolution $max_resolution --mixed_precision fp16
45
+
46
+ # Get number of valid images
47
+ $image_num = Get-ChildItem "$train_dir\$training_folder" -Recurse -File -Include *.npz | Measure-Object | %{$_.Count}
48
+ $repeats = $image_num * $dataset_repeats
49
+
50
+ # calculate max_train_set
51
+ $max_train_set = [Math]::Ceiling($repeats / $train_batch_size * $epoch)
52
+
53
+ accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune.py `
54
+ --pretrained_model_name_or_path=$pretrained_model_name_or_path `
55
+ --in_json $train_dir"\meta_lat.json" `
56
+ --train_data_dir=$train_dir"\"$training_folder `
57
+ --output_dir=$train_dir"\fine_tuned2" `
58
+ --train_batch_size=$train_batch_size `
59
+ --dataset_repeats=$dataset_repeats `
60
+ --learning_rate=$learning_rate `
61
+ --max_train_steps=$max_train_set `
62
+ --use_8bit_adam --xformers `
63
+ --mixed_precision=$mixed_precision `
64
+ --save_every_n_epochs=$save_every_n_epochs `
65
+ --train_text_encoder `
66
+ --save_precision="fp16"
67
+
68
+ # accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process D:\kohya_ss\diffusers_fine_tuning\fine_tune_v1-ber.py `
69
+ # --pretrained_model_name_or_path=$train_dir"\fine_tuned\last.ckpt" `
70
+ # --in_json $train_dir"\meta_lat.json" `
71
+ # --train_data_dir=$train_dir"\"$training_folder `
72
+ # --output_dir=$train_dir"\fine_tuned2" `
73
+ # --train_batch_size=$train_batch_size `
74
+ # --dataset_repeats=$([Math]::Ceiling($dataset_repeats / 2)) `
75
+ # --learning_rate=$learning_rate `
76
+ # --max_train_steps=$([Math]::Ceiling($max_train_set / 2)) `
77
+ # --use_8bit_adam --xformers `
78
+ # --mixed_precision=$mixed_precision `
79
+ # --save_every_n_epochs=$save_every_n_epochs `
80
+ # --save_half
lego_set.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3dfb8d1ed4e4c6f2b4ddc9ae197f8d7ae0114d2f47ae5b7c6be45d384d32513
3
+ size 2132856686