cerspense commited on
Commit
b570381
1 Parent(s): 0c52f59

Upload 7 files

Browse files
VQGAN_autoencoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930e9865584beae2405d29bc06a05db3bb6a5b34eedd40a7db29b9156ed7d098
3
+ size 2607657443
zs2_576w/configuration.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { "framework": "pytorch",
2
+ "task": "text-to-video-synthesis",
3
+ "model": {
4
+ "type": "latent-text-to-video-synthesis",
5
+ "model_args": {
6
+ "ckpt_clip": "open_clip_pytorch_model.bin",
7
+ "ckpt_unet": "text2video_pytorch_model.pth",
8
+ "ckpt_autoencoder": "../VQGAN_autoencoder.pth",
9
+ "max_frames": 16,
10
+ "tiny_gpu": 1
11
+ },
12
+ "model_cfg": {
13
+ "unet_in_dim": 4,
14
+ "unet_dim": 320,
15
+ "unet_y_dim": 768,
16
+ "unet_context_dim": 1024,
17
+ "unet_out_dim": 4,
18
+ "unet_dim_mult": [1, 2, 4, 4],
19
+ "unet_num_heads": 8,
20
+ "unet_head_dim": 64,
21
+ "unet_res_blocks": 2,
22
+ "unet_attn_scales": [1, 0.5, 0.25],
23
+ "unet_dropout": 0.1,
24
+ "temporal_attention": "True",
25
+ "num_timesteps": 1000,
26
+ "mean_type": "eps",
27
+ "var_type": "fixed_small",
28
+ "loss_type": "mse"
29
+ }
30
+ },
31
+ "pipeline": {
32
+ "type": "latent-text-to-video-synthesis"
33
+ }
34
+ }
zs2_576w/open_clip_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7737c257bec4a587785ae6b9bf52cc0c16f041ef776df6bb60928615059a2878
3
+ size 1972448549
zs2_576w/text2video_pytorch_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6040b6383685912fa5d3aeb1e84d6efe1d11f4de773c23f3f2a5e97c12ab6b7
3
+ size 2822972283
zs2_XL/configuration.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { "framework": "pytorch",
2
+ "task": "text-to-video-synthesis",
3
+ "model": {
4
+ "type": "latent-text-to-video-synthesis",
5
+ "model_args": {
6
+ "ckpt_clip": "open_clip_pytorch_model.bin",
7
+ "ckpt_unet": "text2video_pytorch_model.pth",
8
+ "ckpt_autoencoder": "../VQGAN_autoencoder.pth",
9
+ "max_frames": 16,
10
+ "tiny_gpu": 1
11
+ },
12
+ "model_cfg": {
13
+ "unet_in_dim": 4,
14
+ "unet_dim": 320,
15
+ "unet_y_dim": 768,
16
+ "unet_context_dim": 1024,
17
+ "unet_out_dim": 4,
18
+ "unet_dim_mult": [1, 2, 4, 4],
19
+ "unet_num_heads": 8,
20
+ "unet_head_dim": 64,
21
+ "unet_res_blocks": 2,
22
+ "unet_attn_scales": [1, 0.5, 0.25],
23
+ "unet_dropout": 0.1,
24
+ "temporal_attention": "True",
25
+ "num_timesteps": 1000,
26
+ "mean_type": "eps",
27
+ "var_type": "fixed_small",
28
+ "loss_type": "mse"
29
+ }
30
+ },
31
+ "pipeline": {
32
+ "type": "latent-text-to-video-synthesis"
33
+ }
34
+ }
zs2_XL/open_clip_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b25d2b2605ea43e0447eb84b8b08ba027855569f74391ecc9a3abf283f045441
3
+ size 1972448549
zs2_XL/text2video_pytorch_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18dd886130ca1d7228900ac703e88f96f358f040cd56f5392f1d8d7b174ec750
3
+ size 2822972283