End of training
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- checkpoint-45000/optimizer.bin +3 -0
- checkpoint-45000/random_states_0.pkl +3 -0
- checkpoint-45000/scheduler.bin +3 -0
- checkpoint-45000/unet/config.json +73 -0
- checkpoint-45000/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-45000/unet_ema/config.json +80 -0
- checkpoint-45000/unet_ema/diffusion_pytorch_model.safetensors +3 -0
- eval/edited_image_epoch_0_output_0.png +0 -0
- eval/edited_image_epoch_0_output_1.png +0 -0
- eval/edited_image_epoch_0_output_2.png +0 -0
- eval/edited_image_epoch_0_output_3.png +0 -0
- eval/edited_image_epoch_1_output_0.png +0 -0
- eval/edited_image_epoch_1_output_1.png +0 -0
- eval/edited_image_epoch_1_output_2.png +0 -0
- eval/edited_image_epoch_1_output_3.png +0 -0
- eval/edited_image_epoch_2_output_0.png +0 -0
- eval/edited_image_epoch_2_output_1.png +0 -0
- eval/edited_image_epoch_2_output_2.png +0 -0
- eval/edited_image_epoch_2_output_3.png +0 -0
- eval/edited_image_epoch_3_output_0.png +0 -0
- eval/edited_image_epoch_3_output_1.png +0 -0
- eval/edited_image_epoch_3_output_2.png +0 -0
- eval/edited_image_epoch_3_output_3.png +0 -0
- eval/edited_image_epoch_4_output_0.png +0 -0
- eval/edited_image_epoch_4_output_1.png +0 -0
- eval/edited_image_epoch_4_output_2.png +0 -0
- eval/edited_image_epoch_4_output_3.png +0 -0
- eval/edited_image_epoch_5_output_0.png +0 -0
- eval/edited_image_epoch_5_output_1.png +0 -0
- eval/edited_image_epoch_5_output_2.png +0 -0
- eval/edited_image_epoch_5_output_3.png +0 -0
- eval/edited_image_epoch_6_output_0.png +0 -0
- eval/edited_image_epoch_6_output_1.png +0 -0
- eval/edited_image_epoch_6_output_2.png +0 -0
- eval/edited_image_epoch_6_output_3.png +0 -0
- eval/edited_image_epoch_7_output_0.png +0 -0
- eval/edited_image_epoch_7_output_1.png +0 -0
- eval/edited_image_epoch_7_output_2.png +0 -0
- eval/edited_image_epoch_7_output_3.png +0 -0
- eval/edited_image_epoch_8_output_0.png +0 -0
- eval/edited_image_epoch_8_output_1.png +0 -0
- eval/edited_image_epoch_8_output_2.png +0 -0
- eval/edited_image_epoch_8_output_3.png +0 -0
- eval/edited_image_epoch_9_output_0.png +0 -0
- eval/edited_image_epoch_9_output_1.png +0 -0
- eval/edited_image_epoch_9_output_2.png +0 -0
- eval/edited_image_epoch_9_output_3.png +0 -0
- eval/original_image.png +0 -0
- feature_extractor/preprocessor_config.json +44 -0
- model_index.json +38 -0
checkpoint-45000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bacba97c3e7a79ec6661e298464705c5370dbf6421d70029bcf34550e5790c4b
|
3 |
+
size 6927959764
|
checkpoint-45000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1d9c6d45dbc471cf5c49027b96695b6808c0e684cf8a1d74ccc3844533251dd
|
3 |
+
size 14344
|
checkpoint-45000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51608f2d5f8e7225c3bf357787d03c0d7a58df130a414f4ed409483d382b0bc3
|
3 |
+
size 1000
|
checkpoint-45000/unet/config.json
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.28.0.dev0",
|
4 |
+
"_name_or_path": "stabilityai/stable-diffusion-2-1",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20,
|
13 |
+
20
|
14 |
+
],
|
15 |
+
"attention_type": "default",
|
16 |
+
"block_out_channels": [
|
17 |
+
320,
|
18 |
+
640,
|
19 |
+
1280,
|
20 |
+
1280
|
21 |
+
],
|
22 |
+
"center_input_sample": false,
|
23 |
+
"class_embed_type": null,
|
24 |
+
"class_embeddings_concat": false,
|
25 |
+
"conv_in_kernel": 3,
|
26 |
+
"conv_out_kernel": 3,
|
27 |
+
"cross_attention_dim": 1024,
|
28 |
+
"cross_attention_norm": null,
|
29 |
+
"down_block_types": [
|
30 |
+
"CrossAttnDownBlock2D",
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"DownBlock2D"
|
34 |
+
],
|
35 |
+
"downsample_padding": 1,
|
36 |
+
"dropout": 0.0,
|
37 |
+
"dual_cross_attention": false,
|
38 |
+
"encoder_hid_dim": null,
|
39 |
+
"encoder_hid_dim_type": null,
|
40 |
+
"flip_sin_to_cos": true,
|
41 |
+
"freq_shift": 0,
|
42 |
+
"in_channels": 8,
|
43 |
+
"layers_per_block": 2,
|
44 |
+
"mid_block_only_cross_attention": null,
|
45 |
+
"mid_block_scale_factor": 1,
|
46 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
47 |
+
"norm_eps": 1e-05,
|
48 |
+
"norm_num_groups": 32,
|
49 |
+
"num_attention_heads": null,
|
50 |
+
"num_class_embeds": null,
|
51 |
+
"only_cross_attention": false,
|
52 |
+
"out_channels": 4,
|
53 |
+
"projection_class_embeddings_input_dim": null,
|
54 |
+
"resnet_out_scale_factor": 1.0,
|
55 |
+
"resnet_skip_time_act": false,
|
56 |
+
"resnet_time_scale_shift": "default",
|
57 |
+
"reverse_transformer_layers_per_block": null,
|
58 |
+
"sample_size": 96,
|
59 |
+
"time_cond_proj_dim": null,
|
60 |
+
"time_embedding_act_fn": null,
|
61 |
+
"time_embedding_dim": null,
|
62 |
+
"time_embedding_type": "positional",
|
63 |
+
"timestep_post_act": null,
|
64 |
+
"transformer_layers_per_block": 1,
|
65 |
+
"up_block_types": [
|
66 |
+
"UpBlock2D",
|
67 |
+
"CrossAttnUpBlock2D",
|
68 |
+
"CrossAttnUpBlock2D",
|
69 |
+
"CrossAttnUpBlock2D"
|
70 |
+
],
|
71 |
+
"upcast_attention": true,
|
72 |
+
"use_linear_projection": true
|
73 |
+
}
|
checkpoint-45000/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e1efa4449e45a8f366dcb7317a68b120961c781b69d1d980f5a9936bed52444
|
3 |
+
size 3463772592
|
checkpoint-45000/unet_ema/config.json
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.28.0.dev0",
|
4 |
+
"_name_or_path": "stabilityai/stable-diffusion-2-1",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20,
|
13 |
+
20
|
14 |
+
],
|
15 |
+
"attention_type": "default",
|
16 |
+
"block_out_channels": [
|
17 |
+
320,
|
18 |
+
640,
|
19 |
+
1280,
|
20 |
+
1280
|
21 |
+
],
|
22 |
+
"center_input_sample": false,
|
23 |
+
"class_embed_type": null,
|
24 |
+
"class_embeddings_concat": false,
|
25 |
+
"conv_in_kernel": 3,
|
26 |
+
"conv_out_kernel": 3,
|
27 |
+
"cross_attention_dim": 1024,
|
28 |
+
"cross_attention_norm": null,
|
29 |
+
"decay": 0.9999,
|
30 |
+
"down_block_types": [
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"CrossAttnDownBlock2D",
|
34 |
+
"DownBlock2D"
|
35 |
+
],
|
36 |
+
"downsample_padding": 1,
|
37 |
+
"dropout": 0.0,
|
38 |
+
"dual_cross_attention": false,
|
39 |
+
"encoder_hid_dim": null,
|
40 |
+
"encoder_hid_dim_type": null,
|
41 |
+
"flip_sin_to_cos": true,
|
42 |
+
"freq_shift": 0,
|
43 |
+
"in_channels": 8,
|
44 |
+
"inv_gamma": 1.0,
|
45 |
+
"layers_per_block": 2,
|
46 |
+
"mid_block_only_cross_attention": null,
|
47 |
+
"mid_block_scale_factor": 1,
|
48 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
49 |
+
"min_decay": 0.0,
|
50 |
+
"norm_eps": 1e-05,
|
51 |
+
"norm_num_groups": 32,
|
52 |
+
"num_attention_heads": null,
|
53 |
+
"num_class_embeds": null,
|
54 |
+
"only_cross_attention": false,
|
55 |
+
"optimization_step": 45000,
|
56 |
+
"out_channels": 4,
|
57 |
+
"power": 0.6666666666666666,
|
58 |
+
"projection_class_embeddings_input_dim": null,
|
59 |
+
"resnet_out_scale_factor": 1.0,
|
60 |
+
"resnet_skip_time_act": false,
|
61 |
+
"resnet_time_scale_shift": "default",
|
62 |
+
"reverse_transformer_layers_per_block": null,
|
63 |
+
"sample_size": 96,
|
64 |
+
"time_cond_proj_dim": null,
|
65 |
+
"time_embedding_act_fn": null,
|
66 |
+
"time_embedding_dim": null,
|
67 |
+
"time_embedding_type": "positional",
|
68 |
+
"timestep_post_act": null,
|
69 |
+
"transformer_layers_per_block": 1,
|
70 |
+
"up_block_types": [
|
71 |
+
"UpBlock2D",
|
72 |
+
"CrossAttnUpBlock2D",
|
73 |
+
"CrossAttnUpBlock2D",
|
74 |
+
"CrossAttnUpBlock2D"
|
75 |
+
],
|
76 |
+
"upcast_attention": true,
|
77 |
+
"update_after_step": 0,
|
78 |
+
"use_ema_warmup": false,
|
79 |
+
"use_linear_projection": true
|
80 |
+
}
|
checkpoint-45000/unet_ema/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77a5a1bc50c0a37018db285c996acc430e9d1a8fce449fcadb437b814bcfd4eb
|
3 |
+
size 3463772592
|
eval/edited_image_epoch_0_output_0.png
ADDED
![]() |
eval/edited_image_epoch_0_output_1.png
ADDED
![]() |
eval/edited_image_epoch_0_output_2.png
ADDED
![]() |
eval/edited_image_epoch_0_output_3.png
ADDED
![]() |
eval/edited_image_epoch_1_output_0.png
ADDED
![]() |
eval/edited_image_epoch_1_output_1.png
ADDED
![]() |
eval/edited_image_epoch_1_output_2.png
ADDED
![]() |
eval/edited_image_epoch_1_output_3.png
ADDED
![]() |
eval/edited_image_epoch_2_output_0.png
ADDED
![]() |
eval/edited_image_epoch_2_output_1.png
ADDED
![]() |
eval/edited_image_epoch_2_output_2.png
ADDED
![]() |
eval/edited_image_epoch_2_output_3.png
ADDED
![]() |
eval/edited_image_epoch_3_output_0.png
ADDED
![]() |
eval/edited_image_epoch_3_output_1.png
ADDED
![]() |
eval/edited_image_epoch_3_output_2.png
ADDED
![]() |
eval/edited_image_epoch_3_output_3.png
ADDED
![]() |
eval/edited_image_epoch_4_output_0.png
ADDED
![]() |
eval/edited_image_epoch_4_output_1.png
ADDED
![]() |
eval/edited_image_epoch_4_output_2.png
ADDED
![]() |
eval/edited_image_epoch_4_output_3.png
ADDED
![]() |
eval/edited_image_epoch_5_output_0.png
ADDED
![]() |
eval/edited_image_epoch_5_output_1.png
ADDED
![]() |
eval/edited_image_epoch_5_output_2.png
ADDED
![]() |
eval/edited_image_epoch_5_output_3.png
ADDED
![]() |
eval/edited_image_epoch_6_output_0.png
ADDED
![]() |
eval/edited_image_epoch_6_output_1.png
ADDED
![]() |
eval/edited_image_epoch_6_output_2.png
ADDED
![]() |
eval/edited_image_epoch_6_output_3.png
ADDED
![]() |
eval/edited_image_epoch_7_output_0.png
ADDED
![]() |
eval/edited_image_epoch_7_output_1.png
ADDED
![]() |
eval/edited_image_epoch_7_output_2.png
ADDED
![]() |
eval/edited_image_epoch_7_output_3.png
ADDED
![]() |
eval/edited_image_epoch_8_output_0.png
ADDED
![]() |
eval/edited_image_epoch_8_output_1.png
ADDED
![]() |
eval/edited_image_epoch_8_output_2.png
ADDED
![]() |
eval/edited_image_epoch_8_output_3.png
ADDED
![]() |
eval/edited_image_epoch_9_output_0.png
ADDED
![]() |
eval/edited_image_epoch_9_output_1.png
ADDED
![]() |
eval/edited_image_epoch_9_output_2.png
ADDED
![]() |
eval/edited_image_epoch_9_output_3.png
ADDED
![]() |
eval/original_image.png
ADDED
![]() |
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_valid_processor_keys": [
|
3 |
+
"images",
|
4 |
+
"do_resize",
|
5 |
+
"size",
|
6 |
+
"resample",
|
7 |
+
"do_center_crop",
|
8 |
+
"crop_size",
|
9 |
+
"do_rescale",
|
10 |
+
"rescale_factor",
|
11 |
+
"do_normalize",
|
12 |
+
"image_mean",
|
13 |
+
"image_std",
|
14 |
+
"do_convert_rgb",
|
15 |
+
"return_tensors",
|
16 |
+
"data_format",
|
17 |
+
"input_data_format"
|
18 |
+
],
|
19 |
+
"crop_size": {
|
20 |
+
"height": 224,
|
21 |
+
"width": 224
|
22 |
+
},
|
23 |
+
"do_center_crop": true,
|
24 |
+
"do_convert_rgb": true,
|
25 |
+
"do_normalize": true,
|
26 |
+
"do_rescale": true,
|
27 |
+
"do_resize": true,
|
28 |
+
"image_mean": [
|
29 |
+
0.48145466,
|
30 |
+
0.4578275,
|
31 |
+
0.40821073
|
32 |
+
],
|
33 |
+
"image_processor_type": "CLIPImageProcessor",
|
34 |
+
"image_std": [
|
35 |
+
0.26862954,
|
36 |
+
0.26130258,
|
37 |
+
0.27577711
|
38 |
+
],
|
39 |
+
"resample": 3,
|
40 |
+
"rescale_factor": 0.00392156862745098,
|
41 |
+
"size": {
|
42 |
+
"shortest_edge": 224
|
43 |
+
}
|
44 |
+
}
|
model_index.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionInstructPix2PixPipeline",
|
3 |
+
"_diffusers_version": "0.28.0.dev0",
|
4 |
+
"_name_or_path": "stabilityai/stable-diffusion-2-1",
|
5 |
+
"feature_extractor": [
|
6 |
+
"transformers",
|
7 |
+
"CLIPImageProcessor"
|
8 |
+
],
|
9 |
+
"image_encoder": [
|
10 |
+
null,
|
11 |
+
null
|
12 |
+
],
|
13 |
+
"requires_safety_checker": false,
|
14 |
+
"safety_checker": [
|
15 |
+
null,
|
16 |
+
null
|
17 |
+
],
|
18 |
+
"scheduler": [
|
19 |
+
"diffusers",
|
20 |
+
"DDIMScheduler"
|
21 |
+
],
|
22 |
+
"text_encoder": [
|
23 |
+
"transformers",
|
24 |
+
"CLIPTextModel"
|
25 |
+
],
|
26 |
+
"tokenizer": [
|
27 |
+
"transformers",
|
28 |
+
"CLIPTokenizer"
|
29 |
+
],
|
30 |
+
"unet": [
|
31 |
+
"diffusers",
|
32 |
+
"UNet2DConditionModel"
|
33 |
+
],
|
34 |
+
"vae": [
|
35 |
+
"diffusers",
|
36 |
+
"AutoencoderKL"
|
37 |
+
]
|
38 |
+
}
|