jordandavis commited on
Commit
7903343
1 Parent(s): 1745a69

End of training

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. README.md +43 -0
  3. checkpoint-250/custom_checkpoint_0.pkl +3 -0
  4. checkpoint-250/optimizer.bin +3 -0
  5. checkpoint-250/pytorch_model.bin +3 -0
  6. checkpoint-250/random_states_0.pkl +3 -0
  7. checkpoint-250/scaler.pt +3 -0
  8. checkpoint-250/scheduler.bin +3 -0
  9. feature_extractor/preprocessor_config.json +28 -0
  10. image_0.png +0 -0
  11. image_1.png +0 -0
  12. image_10.png +0 -0
  13. image_11.png +0 -0
  14. image_12.png +0 -0
  15. image_13.png +0 -0
  16. image_14.png +0 -0
  17. image_15.png +0 -0
  18. image_16.png +0 -0
  19. image_17.png +0 -0
  20. image_18.png +0 -0
  21. image_19.png +0 -0
  22. image_2.png +0 -0
  23. image_3.png +0 -0
  24. image_4.png +0 -0
  25. image_5.png +0 -0
  26. image_6.png +0 -0
  27. image_7.png +0 -0
  28. image_8.png +0 -0
  29. image_9.png +0 -0
  30. model_index.json +34 -0
  31. safety_checker/config.json +181 -0
  32. safety_checker/model.safetensors +3 -0
  33. samples/0009.png +3 -0
  34. samples/0019.png +3 -0
  35. samples/0029.png +3 -0
  36. samples/0039.png +3 -0
  37. samples/0049.png +3 -0
  38. samples/bulbomembranous-undeceptive-581/image_0_epoch_19.jpg +0 -0
  39. samples/bulbomembranous-undeceptive-581/image_0_epoch_29.jpg +0 -0
  40. samples/bulbomembranous-undeceptive-581/image_0_epoch_39.jpg +0 -0
  41. samples/bulbomembranous-undeceptive-581/image_0_epoch_49.jpg +0 -0
  42. samples/bulbomembranous-undeceptive-581/image_0_epoch_9.jpg +0 -0
  43. samples/bulbomembranous-undeceptive-581/image_1_epoch_19.jpg +0 -0
  44. samples/bulbomembranous-undeceptive-581/image_1_epoch_29.jpg +0 -0
  45. samples/bulbomembranous-undeceptive-581/image_1_epoch_39.jpg +0 -0
  46. samples/bulbomembranous-undeceptive-581/image_1_epoch_49.jpg +0 -0
  47. samples/bulbomembranous-undeceptive-581/image_1_epoch_9.jpg +0 -0
  48. samples/bulbomembranous-undeceptive-581/image_2_epoch_19.jpg +0 -0
  49. samples/bulbomembranous-undeceptive-581/image_2_epoch_29.jpg +0 -0
  50. samples/bulbomembranous-undeceptive-581/image_2_epoch_39.jpg +0 -0
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ samples/0009.png filter=lfs diff=lfs merge=lfs -text
37
+ samples/0019.png filter=lfs diff=lfs merge=lfs -text
38
+ samples/0029.png filter=lfs diff=lfs merge=lfs -text
39
+ samples/0039.png filter=lfs diff=lfs merge=lfs -text
40
+ samples/0049.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: openrail++
4
+ base_model: runwayml/stable-diffusion-inpainting
5
+ instance_prompt: Lucia Wing Chair - Metal Legs
6
+ tags:
7
+ - stable-diffusion-xl
8
+ - stable-diffusion-xl-diffusers
9
+ - text-to-image
10
+ - diffusers
11
+ - lora
12
+ inference: true
13
+ ---
14
+
15
+ # LoRA DreamBooth - jordandavis/lucia_wing_chair
16
+
17
+ These are LoRA adaption weights for runwayml/stable-diffusion-inpainting. The weights were trained on Lucia Wing Chair - Metal Legs using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following.
18
+
19
+ ![img_0](./image_0.png)
20
+ ![img_1](./image_1.png)
21
+ ![img_2](./image_2.png)
22
+ ![img_3](./image_3.png)
23
+ ![img_4](./image_4.png)
24
+ ![img_5](./image_5.png)
25
+ ![img_6](./image_6.png)
26
+ ![img_7](./image_7.png)
27
+ ![img_8](./image_8.png)
28
+ ![img_9](./image_9.png)
29
+ ![img_10](./image_10.png)
30
+ ![img_11](./image_11.png)
31
+ ![img_12](./image_12.png)
32
+ ![img_13](./image_13.png)
33
+ ![img_14](./image_14.png)
34
+ ![img_15](./image_15.png)
35
+ ![img_16](./image_16.png)
36
+ ![img_17](./image_17.png)
37
+ ![img_18](./image_18.png)
38
+ ![img_19](./image_19.png)
39
+
40
+
41
+ LoRA for the text encoder was enabled: False.
42
+
43
+ Special VAE used for training: None.
checkpoint-250/custom_checkpoint_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ab0e40d496070a76d35e2146067639ba0d2bbda96a739f23e6dbd39a2b4dcc0
3
+ size 1040
checkpoint-250/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:149efa67aca470c821cbfc1697f55dec165baa0d1def281b7daadfd41e011ba9
3
+ size 1725138554
checkpoint-250/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35eaf9f2db074fac3fa61de00ddd837834fd53731c54dd411cde2daa40b2e64e
3
+ size 3438427074
checkpoint-250/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88e91264403a34fe2da4c6d9eb8abb7d94d7fd2476ae8bc873721e1bffb89e11
3
+ size 14344
checkpoint-250/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccec82ecb1db9fee8431a9924525ac42c65eba373a478c5528e653b68744a6a9
3
+ size 988
checkpoint-250/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:671d7cdad6271b0e08d32ec8a2aef8dfcbceb770590b34b1f7cb1d58335266bb
3
+ size 1000
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPFeatureExtractor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
image_0.png ADDED
image_1.png ADDED
image_10.png ADDED
image_11.png ADDED
image_12.png ADDED
image_13.png ADDED
image_14.png ADDED
image_15.png ADDED
image_16.png ADDED
image_17.png ADDED
image_18.png ADDED
image_19.png ADDED
image_2.png ADDED
image_3.png ADDED
image_4.png ADDED
image_5.png ADDED
image_6.png ADDED
image_7.png ADDED
image_8.png ADDED
image_9.png ADDED
model_index.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionInpaintPipeline",
3
+ "_diffusers_version": "0.21.4",
4
+ "_name_or_path": "runwayml/stable-diffusion-inpainting",
5
+ "feature_extractor": [
6
+ "transformers",
7
+ "CLIPFeatureExtractor"
8
+ ],
9
+ "requires_safety_checker": true,
10
+ "safety_checker": [
11
+ "stable_diffusion",
12
+ "StableDiffusionSafetyChecker"
13
+ ],
14
+ "scheduler": [
15
+ "diffusers",
16
+ "DDIMScheduler"
17
+ ],
18
+ "text_encoder": [
19
+ "transformers",
20
+ "CLIPTextModel"
21
+ ],
22
+ "tokenizer": [
23
+ "transformers",
24
+ "CLIPTokenizer"
25
+ ],
26
+ "unet": [
27
+ "diffusers",
28
+ "UNet2DConditionModel"
29
+ ],
30
+ "vae": [
31
+ "diffusers",
32
+ "AutoencoderKL"
33
+ ]
34
+ }
safety_checker/config.json ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "afeee10def38be19995784bcc811882409d066e5",
3
+ "_name_or_path": "/home/jordan/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/afeee10def38be19995784bcc811882409d066e5/safety_checker",
4
+ "architectures": [
5
+ "StableDiffusionSafetyChecker"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.0,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 2,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "quick_gelu",
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_factor": 1.0,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 3072,
41
+ "is_decoder": false,
42
+ "is_encoder_decoder": false,
43
+ "label2id": {
44
+ "LABEL_0": 0,
45
+ "LABEL_1": 1
46
+ },
47
+ "layer_norm_eps": 1e-05,
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 77,
51
+ "min_length": 0,
52
+ "model_type": "clip_text_model",
53
+ "no_repeat_ngram_size": 0,
54
+ "num_attention_heads": 12,
55
+ "num_beam_groups": 1,
56
+ "num_beams": 1,
57
+ "num_hidden_layers": 12,
58
+ "num_return_sequences": 1,
59
+ "output_attentions": false,
60
+ "output_hidden_states": false,
61
+ "output_scores": false,
62
+ "pad_token_id": 1,
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "projection_dim": 512,
66
+ "pruned_heads": {},
67
+ "remove_invalid_values": false,
68
+ "repetition_penalty": 1.0,
69
+ "return_dict": true,
70
+ "return_dict_in_generate": false,
71
+ "sep_token_id": null,
72
+ "suppress_tokens": null,
73
+ "task_specific_params": null,
74
+ "temperature": 1.0,
75
+ "tf_legacy_loss": false,
76
+ "tie_encoder_decoder": false,
77
+ "tie_word_embeddings": true,
78
+ "tokenizer_class": null,
79
+ "top_k": 50,
80
+ "top_p": 1.0,
81
+ "torch_dtype": null,
82
+ "torchscript": false,
83
+ "transformers_version": "4.26.1",
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "vocab_size": 49408
87
+ },
88
+ "text_config_dict": {
89
+ "hidden_size": 768,
90
+ "intermediate_size": 3072,
91
+ "num_attention_heads": 12,
92
+ "num_hidden_layers": 12
93
+ },
94
+ "torch_dtype": "float16",
95
+ "transformers_version": null,
96
+ "vision_config": {
97
+ "_name_or_path": "",
98
+ "add_cross_attention": false,
99
+ "architectures": null,
100
+ "attention_dropout": 0.0,
101
+ "bad_words_ids": null,
102
+ "begin_suppress_tokens": null,
103
+ "bos_token_id": null,
104
+ "chunk_size_feed_forward": 0,
105
+ "cross_attention_hidden_size": null,
106
+ "decoder_start_token_id": null,
107
+ "diversity_penalty": 0.0,
108
+ "do_sample": false,
109
+ "dropout": 0.0,
110
+ "early_stopping": false,
111
+ "encoder_no_repeat_ngram_size": 0,
112
+ "eos_token_id": null,
113
+ "exponential_decay_length_penalty": null,
114
+ "finetuning_task": null,
115
+ "forced_bos_token_id": null,
116
+ "forced_eos_token_id": null,
117
+ "hidden_act": "quick_gelu",
118
+ "hidden_size": 1024,
119
+ "id2label": {
120
+ "0": "LABEL_0",
121
+ "1": "LABEL_1"
122
+ },
123
+ "image_size": 224,
124
+ "initializer_factor": 1.0,
125
+ "initializer_range": 0.02,
126
+ "intermediate_size": 4096,
127
+ "is_decoder": false,
128
+ "is_encoder_decoder": false,
129
+ "label2id": {
130
+ "LABEL_0": 0,
131
+ "LABEL_1": 1
132
+ },
133
+ "layer_norm_eps": 1e-05,
134
+ "length_penalty": 1.0,
135
+ "max_length": 20,
136
+ "min_length": 0,
137
+ "model_type": "clip_vision_model",
138
+ "no_repeat_ngram_size": 0,
139
+ "num_attention_heads": 16,
140
+ "num_beam_groups": 1,
141
+ "num_beams": 1,
142
+ "num_channels": 3,
143
+ "num_hidden_layers": 24,
144
+ "num_return_sequences": 1,
145
+ "output_attentions": false,
146
+ "output_hidden_states": false,
147
+ "output_scores": false,
148
+ "pad_token_id": null,
149
+ "patch_size": 14,
150
+ "prefix": null,
151
+ "problem_type": null,
152
+ "projection_dim": 512,
153
+ "pruned_heads": {},
154
+ "remove_invalid_values": false,
155
+ "repetition_penalty": 1.0,
156
+ "return_dict": true,
157
+ "return_dict_in_generate": false,
158
+ "sep_token_id": null,
159
+ "suppress_tokens": null,
160
+ "task_specific_params": null,
161
+ "temperature": 1.0,
162
+ "tf_legacy_loss": false,
163
+ "tie_encoder_decoder": false,
164
+ "tie_word_embeddings": true,
165
+ "tokenizer_class": null,
166
+ "top_k": 50,
167
+ "top_p": 1.0,
168
+ "torch_dtype": null,
169
+ "torchscript": false,
170
+ "transformers_version": "4.26.1",
171
+ "typical_p": 1.0,
172
+ "use_bfloat16": false
173
+ },
174
+ "vision_config_dict": {
175
+ "hidden_size": 1024,
176
+ "intermediate_size": 4096,
177
+ "num_attention_heads": 16,
178
+ "num_hidden_layers": 24,
179
+ "patch_size": 14
180
+ }
181
+ }
safety_checker/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08902f19b1cfebd7c989f152fc0507bef6898c706a91d666509383122324b511
3
+ size 608018440
samples/0009.png ADDED

Git LFS Details

  • SHA256: cfe2c1f725aa4c5966b3aa93106f22a549383a3dc2cccecc0accdf841bc8a1a1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.51 MB
samples/0019.png ADDED

Git LFS Details

  • SHA256: 163d7dddb0c3664e335cb3929e79a5a4bc6e00558f94dd067bd92d1f544a1ca9
  • Pointer size: 132 Bytes
  • Size of remote file: 1.45 MB
samples/0029.png ADDED

Git LFS Details

  • SHA256: 1189c71f8b20f7567cfd71c63566e34ce187b4163df6e7e5bb48f3fa35ed1974
  • Pointer size: 132 Bytes
  • Size of remote file: 1.51 MB
samples/0039.png ADDED

Git LFS Details

  • SHA256: 4e7e19b0c78453e6e53d2b37ba61618e7f6899fa5d0e2a77a8986927060307ee
  • Pointer size: 132 Bytes
  • Size of remote file: 1.43 MB
samples/0049.png ADDED

Git LFS Details

  • SHA256: 44f180e95c508fd16ea974bcdaff813cb06c2f717d48730b41542d4646c3efa5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.42 MB
samples/bulbomembranous-undeceptive-581/image_0_epoch_19.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_0_epoch_29.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_0_epoch_39.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_0_epoch_49.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_0_epoch_9.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_1_epoch_19.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_1_epoch_29.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_1_epoch_39.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_1_epoch_49.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_1_epoch_9.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_2_epoch_19.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_2_epoch_29.jpg ADDED
samples/bulbomembranous-undeceptive-581/image_2_epoch_39.jpg ADDED