AltLuv commited on
Commit
fda6f38
1 Parent(s): 45b6f4b

End of training

Browse files
lr_scheduler/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf05313b487c70c3cdbba01e48e12ed30fa9fd5bfdcd675858db93b3726e073
3
+ size 1076
model_index.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UTTIPipeline",
3
+ "_diffusers_version": "0.25.0.dev0",
4
+ "scheduler": [
5
+ "sde_torch_param",
6
+ "TorchSDE_PARAM"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "CLIPTextModel"
11
+ ],
12
+ "tokenizer": [
13
+ "transformers",
14
+ "CLIPTokenizer"
15
+ ],
16
+ "unet": [
17
+ "diffusers",
18
+ "UNet2DConditionModel"
19
+ ]
20
+ }
optimizer/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:992a85f1fb12dfe5dccc3d9d630a684a995d261d722a40526268c5bb7f94d976
3
+ size 2664
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "TorchSDE_PARAM",
3
+ "_diffusers_version": "0.25.0.dev0",
4
+ "data_dimension": 3072
5
+ }
scheduler/scheduler_config.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax.numpy as jnp
2
+ import jax
3
+ import torch
4
+ from dataclasses import dataclass
5
+ import sympy
6
+ import sympy as sp
7
+ from sympy import Matrix, Symbol
8
+ import math
9
+ from sde_redefined_param import SDEDimension
10
+ @dataclass
11
+ class SDEConfig:
12
+ name = "Custom"
13
+ variable = Symbol('t', nonnegative=True, real=True)
14
+
15
+ drift_dimension = SDEDimension.SCALAR
16
+ diffusion_dimension = SDEDimension.SCALAR
17
+ diffusion_matrix_dimension = SDEDimension.SCALAR
18
+
19
+ # TODO (KLAUS): HANDLE THE PARAMETERS BEING Ø
20
+ drift_parameters = Matrix([sympy.symbols("f1")])
21
+ diffusion_parameters = Matrix([sympy.symbols("l1")])
22
+
23
+ drift =-variable**2 * drift_parameters[0]**2
24
+ k = 1 #* diffusion_parameters[0]**2
25
+ diffusion = sympy.Piecewise((k * sympy.sin(variable/2 * sympy.pi), variable < 1), (k*1, variable >= 1))
26
+ # TODO (KLAUS) : in the SDE SAMPLING CHANGING Q impacts how we sample z ~ N(0, Q*(delta t))
27
+ diffusion_matrix = 1
28
+
29
+ initial_variable_value = 0
30
+ max_variable_value = 1 # math.inf
31
+ min_sample_value = 1e-6
32
+
33
+ module = 'jax'
34
+
35
+ drift_integral_form=True
36
+ diffusion_integral_form=True
37
+ diffusion_integral_decomposition = 'cholesky' # ldl
38
+
39
+
40
+
41
+ target = "epsilon" # x0
scheduler/sdeparameters.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d263ef537c82ddadac98389f9001afbb35890e2909cf951d39af5ab87d93ad9
3
+ size 136
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AltLuv/pokemon-test",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.35.2",
24
+ "vocab_size": 49408
25
+ }
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd
3
+ size 246144152
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.25.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "attention_type": "default",
10
+ "block_out_channels": [
11
+ 128,
12
+ 128,
13
+ 256,
14
+ 256,
15
+ 512,
16
+ 512
17
+ ],
18
+ "center_input_sample": false,
19
+ "class_embed_type": null,
20
+ "class_embeddings_concat": false,
21
+ "conv_in_kernel": 3,
22
+ "conv_out_kernel": 3,
23
+ "cross_attention_dim": 768,
24
+ "cross_attention_norm": null,
25
+ "down_block_types": [
26
+ "DownBlock2D",
27
+ "DownBlock2D",
28
+ "DownBlock2D",
29
+ "DownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "DownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "dropout": 0.0,
35
+ "dual_cross_attention": false,
36
+ "encoder_hid_dim": null,
37
+ "encoder_hid_dim_type": null,
38
+ "flip_sin_to_cos": true,
39
+ "freq_shift": 0,
40
+ "in_channels": 3,
41
+ "layers_per_block": 2,
42
+ "mid_block_only_cross_attention": null,
43
+ "mid_block_scale_factor": 1,
44
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
45
+ "norm_eps": 1e-05,
46
+ "norm_num_groups": 32,
47
+ "num_attention_heads": null,
48
+ "num_class_embeds": null,
49
+ "only_cross_attention": false,
50
+ "out_channels": 3,
51
+ "projection_class_embeddings_input_dim": null,
52
+ "resnet_out_scale_factor": 1.0,
53
+ "resnet_skip_time_act": false,
54
+ "resnet_time_scale_shift": "default",
55
+ "reverse_transformer_layers_per_block": null,
56
+ "sample_size": 32,
57
+ "time_cond_proj_dim": null,
58
+ "time_embedding_act_fn": null,
59
+ "time_embedding_dim": null,
60
+ "time_embedding_type": "positional",
61
+ "timestep_post_act": null,
62
+ "transformer_layers_per_block": 1,
63
+ "up_block_types": [
64
+ "UpBlock2D",
65
+ "CrossAttnUpBlock2D",
66
+ "UpBlock2D",
67
+ "UpBlock2D",
68
+ "UpBlock2D",
69
+ "UpBlock2D"
70
+ ],
71
+ "upcast_attention": false,
72
+ "use_linear_projection": false
73
+ }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:235728a3ee5fa837a7419ef04d3c73d23108f93c9804a1bd0953c8fc23063c17
3
+ size 574476604