kabachuha commited on
Commit
d12c105
1 Parent(s): 3c8e178

add the pruned weights

Browse files
VQGAN_autoencoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930e9865584beae2405d29bc06a05db3bb6a5b34eedd40a7db29b9156ed7d098
3
+ size 2607657443
configuration.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ { "framework": "pytorch",
2
+ "task": "text-to-video-synthesis",
3
+ "model": {
4
+ "type": "latent-text-to-video-synthesis",
5
+ "model_args": {
6
+ "ckpt_clip": "open_clip_pytorch_model.bin",
7
+ "ckpt_unet": "text2video_pytorch_model.pth",
8
+ "ckpt_autoencoder": "VQGAN_autoencoder.pth",
9
+ "max_frames": 16,
10
+ "tiny_gpu": 1
11
+ },
12
+ "model_cfg": {
13
+ "unet_in_dim": 4,
14
+ "unet_dim": 320,
15
+ "unet_y_dim": 768,
16
+ "unet_context_dim": 1024,
17
+ "unet_out_dim": 4,
18
+ "unet_dim_mult": [1, 2, 4, 4],
19
+ "unet_num_heads": 8,
20
+ "unet_head_dim": 64,
21
+ "unet_res_blocks": 2,
22
+ "unet_attn_scales": [1, 0.5, 0.25],
23
+ "unet_dropout": 0.1,
24
+ "temporal_attention": "True",
25
+ "num_timesteps": 1000,
26
+ "mean_type": "eps",
27
+ "var_type": "fixed_small",
28
+ "loss_type": "mse"
29
+ }
30
+ },
31
+ "pipeline": {
32
+ "type": "latent-text-to-video-synthesis"
33
+ }
34
+ }
35
+
open_clip_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73c32c62eebf1112b0693ff9e3ecfa0573ba02cd279420ea4da4af1cbfb39e3b
3
+ size 1972451989
text2video_pytorch_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbba5db98d5432378f9ccdb6bd572768c7ff190dd83b9b76c3218594c793fedd
3
+ size 2823000429