iriscope commited on
Commit
78a2114
1 Parent(s): 0b792f8

Upload with huggingface_hub

Browse files
logs/dreambooth/1681476308.1182032/events.out.tfevents.1681476308.s-iriscope-dreambooth-training-c7fc8-69459f4fbf-5cn5c.1.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17b434a58b58155ab5ceb9defa178db7e8405d90f458792bb31e4e055edfd58d
3
+ size 2476
logs/dreambooth/events.out.tfevents.1681476308.s-iriscope-dreambooth-training-c7fc8-69459f4fbf-5cn5c.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4055bf852e91d8e742a270446efcb70c572096451a2ea6d84fb6bf7f9cbaf98f
3
+ size 957434
model_index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.9.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.6.0",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
safety_checker/config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
- "_commit_hash": "4bb648a606ef040e7685bde262611766a5fdd67b",
3
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
@@ -69,7 +68,6 @@
69
  "sep_token_id": null,
70
  "task_specific_params": null,
71
  "temperature": 1.0,
72
- "tf_legacy_loss": false,
73
  "tie_encoder_decoder": false,
74
  "tie_word_embeddings": true,
75
  "tokenizer_class": null,
@@ -77,7 +75,7 @@
77
  "top_p": 1.0,
78
  "torch_dtype": null,
79
  "torchscript": false,
80
- "transformers_version": "4.22.0.dev0",
81
  "typical_p": 1.0,
82
  "use_bfloat16": false,
83
  "vocab_size": 49408
@@ -135,7 +133,6 @@
135
  "num_attention_heads": 16,
136
  "num_beam_groups": 1,
137
  "num_beams": 1,
138
- "num_channels": 3,
139
  "num_hidden_layers": 24,
140
  "num_return_sequences": 1,
141
  "output_attentions": false,
@@ -153,7 +150,6 @@
153
  "sep_token_id": null,
154
  "task_specific_params": null,
155
  "temperature": 1.0,
156
- "tf_legacy_loss": false,
157
  "tie_encoder_decoder": false,
158
  "tie_word_embeddings": true,
159
  "tokenizer_class": null,
@@ -161,7 +157,7 @@
161
  "top_p": 1.0,
162
  "torch_dtype": null,
163
  "torchscript": false,
164
- "transformers_version": "4.22.0.dev0",
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
167
  },
1
  {
2
+ "_name_or_path": "./safety_module",
 
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
 
71
  "tie_encoder_decoder": false,
72
  "tie_word_embeddings": true,
73
  "tokenizer_class": null,
75
  "top_p": 1.0,
76
  "torch_dtype": null,
77
  "torchscript": false,
78
+ "transformers_version": "4.21.0.dev0",
79
  "typical_p": 1.0,
80
  "use_bfloat16": false,
81
  "vocab_size": 49408
133
  "num_attention_heads": 16,
134
  "num_beam_groups": 1,
135
  "num_beams": 1,
 
136
  "num_hidden_layers": 24,
137
  "num_return_sequences": 1,
138
  "output_attentions": false,
150
  "sep_token_id": null,
151
  "task_specific_params": null,
152
  "temperature": 1.0,
 
153
  "tie_encoder_decoder": false,
154
  "tie_word_embeddings": true,
155
  "tokenizer_class": null,
157
  "top_p": 1.0,
158
  "torch_dtype": null,
159
  "torchscript": false,
160
+ "transformers_version": "4.21.0.dev0",
161
  "typical_p": 1.0,
162
  "use_bfloat16": false
163
  },
scheduler/scheduler_config.json CHANGED
@@ -1,10 +1,9 @@
1
  {
2
- "_class_name": "DDIMScheduler",
3
- "_diffusers_version": "0.9.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
- "clip_sample": false,
8
  "num_train_timesteps": 1000,
9
  "prediction_type": "epsilon",
10
  "set_alpha_to_one": false,
1
  {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.11.1",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
7
  "num_train_timesteps": 1000,
8
  "prediction_type": "epsilon",
9
  "set_alpha_to_one": false,
text_encoder/config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "CLIPTextModel"
4
  ],
@@ -6,19 +7,19 @@
6
  "bos_token_id": 0,
7
  "dropout": 0.0,
8
  "eos_token_id": 2,
9
- "hidden_act": "gelu",
10
- "hidden_size": 1024,
11
  "initializer_factor": 1.0,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 4096,
14
  "layer_norm_eps": 1e-05,
15
  "max_position_embeddings": 77,
16
  "model_type": "clip_text_model",
17
- "num_attention_heads": 16,
18
- "num_hidden_layers": 23,
19
  "pad_token_id": 1,
20
- "projection_dim": 512,
21
  "torch_dtype": "float32",
22
- "transformers_version": "4.21.1",
23
  "vocab_size": 49408
24
  }
1
  {
2
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--multimodalart--sd-fine-tunable/snapshots/9dabd4dbbdd4c72e2ffbc8fb4e28debef0254949",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "projection_dim": 768,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0b6635b44bbfc50a61f74468ea18314e7bf3387db059390df4f30811b053849
3
- size 1361674657
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f9a1e23fb9cc35a64fd89783b7a0bc89a9f399dcd05a1f12a7d5cc74738e5b9
3
+ size 492308087
tokenizer/special_tokens_map.json CHANGED
@@ -13,7 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "!",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "stabilityai/stable-diffusion-2-1-base",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "/home/user/.cache/huggingface/hub/models--multimodalart--sd-fine-tunable/snapshots/9dabd4dbbdd4c72e2ffbc8fb4e28debef0254949/tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,13 +1,9 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.9.0.dev0",
 
4
  "act_fn": "silu",
5
- "attention_head_dim": [
6
- 5,
7
- 10,
8
- 20,
9
- 20
10
- ],
11
  "block_out_channels": [
12
  320,
13
  640,
@@ -15,7 +11,8 @@
15
  1280
16
  ],
17
  "center_input_sample": false,
18
- "cross_attention_dim": 1024,
 
19
  "down_block_types": [
20
  "CrossAttnDownBlock2D",
21
  "CrossAttnDownBlock2D",
@@ -29,17 +26,20 @@
29
  "in_channels": 4,
30
  "layers_per_block": 2,
31
  "mid_block_scale_factor": 1,
 
32
  "norm_eps": 1e-05,
33
  "norm_num_groups": 32,
34
  "num_class_embeds": null,
35
  "only_cross_attention": false,
36
  "out_channels": 4,
37
- "sample_size": 32,
 
38
  "up_block_types": [
39
  "UpBlock2D",
40
  "CrossAttnUpBlock2D",
41
  "CrossAttnUpBlock2D",
42
  "CrossAttnUpBlock2D"
43
  ],
 
44
  "use_linear_projection": false
45
  }
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.11.1",
4
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--multimodalart--sd-fine-tunable/snapshots/9dabd4dbbdd4c72e2ffbc8fb4e28debef0254949",
5
  "act_fn": "silu",
6
+ "attention_head_dim": 8,
 
 
 
 
 
7
  "block_out_channels": [
8
  320,
9
  640,
11
  1280
12
  ],
13
  "center_input_sample": false,
14
+ "class_embed_type": null,
15
+ "cross_attention_dim": 768,
16
  "down_block_types": [
17
  "CrossAttnDownBlock2D",
18
  "CrossAttnDownBlock2D",
26
  "in_channels": 4,
27
  "layers_per_block": 2,
28
  "mid_block_scale_factor": 1,
29
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
30
  "norm_eps": 1e-05,
31
  "norm_num_groups": 32,
32
  "num_class_embeds": null,
33
  "only_cross_attention": false,
34
  "out_channels": 4,
35
+ "resnet_time_scale_shift": "default",
36
+ "sample_size": 64,
37
  "up_block_types": [
38
  "UpBlock2D",
39
  "CrossAttnUpBlock2D",
40
  "CrossAttnUpBlock2D",
41
  "CrossAttnUpBlock2D"
42
  ],
43
+ "upcast_attention": false,
44
  "use_linear_projection": false
45
  }
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fa89c9915435c4ca57fa131cd965923eb856c5f2d9b2f4d0b30009006a3dbe7
3
- size 3463925413
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b51f2826055124fb4bbc709eda825e77d6fed2748eeb65954c506dd818f56b
3
+ size 3438364325
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.9.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -19,7 +20,7 @@
19
  "layers_per_block": 2,
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
- "sample_size": 256,
23
  "up_block_types": [
24
  "UpDecoderBlock2D",
25
  "UpDecoderBlock2D",
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.11.1",
4
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--multimodalart--sd-fine-tunable/snapshots/9dabd4dbbdd4c72e2ffbc8fb4e28debef0254949/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
+ "sample_size": 512,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",