kylielee505 commited on
Commit
9e2acfe
1 Parent(s): d0fc979

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ thumbnail: https://user-images.githubusercontent.com/54370274/243292723-fa703668-a931-41e1-8bcf-19c72203980b.png
3
+ tags:
4
+ - TextTovideo
5
+ - Text2Video
6
+ - text-to-video
7
+ ---
8
+
9
+ 🐣 Please follow me for new updates https://twitter.com/camenduru <br />
10
+ 🔥 Please join our discord server https://discord.gg/k5BwmmvJJU
11
+
12
+ ![00041-3056174990](https://github.com/camenduru/Text-To-Video-Finetuning-colab/assets/54370274/fa703668-a931-41e1-8bcf-19c72203980b)
13
+
14
+ # Potat 1️⃣
15
+ First Open-Source 1024x576 Text To Video Model 🥳
16
+
17
+ https://huggingface.co/vdo/potat1-5000/tree/main <br />
18
+ https://huggingface.co/vdo/potat1-10000/tree/main <br />
19
+ https://huggingface.co/vdo/potat1-10000-base-text-encoder/tree/main <br />
20
+ https://huggingface.co/vdo/potat1-15000/tree/main <br />
21
+ https://huggingface.co/vdo/potat1-20000/tree/main <br />
22
+ https://huggingface.co/vdo/potat1-25000/tree/main <br />
23
+ https://huggingface.co/vdo/potat1-30000/tree/main <br />
24
+ https://huggingface.co/vdo/potat1-35000/tree/main <br />
25
+ https://huggingface.co/vdo/potat1-40000/tree/main <br />
26
+ https://huggingface.co/vdo/potat1-45000/tree/main <br />
27
+ https://huggingface.co/vdo/potat1-50000/tree/main <br />
28
+ https://huggingface.co/vdo/potat1-50000-base-text-encoder/tree/main = https://huggingface.co/camenduru/potat1 (you are here) <br />
29
+
30
+
31
+ ### Info
32
+ Prototype Model <br />
33
+ Trained with https://lambdalabs.com ❤ 1xA100 (40GB) <br />
34
+ 2197 clips, 68388 tagged frames ( [salesforce/blip2-opt-6.7b-coco](https://huggingface.co/Salesforce/blip2-opt-6.7b-coco) ) <br />
35
+ train_steps: 10000 <br />
36
+
37
+ ### Dataset & Config
38
+ https://huggingface.co/camenduru/potat1_dataset/tree/main
39
+
40
+ ### Finetuning
41
+ https://github.com/Breakthrough/PySceneDetect <br />
42
+ https://github.com/ExponentialML/Video-BLIP2-Preprocessor <br />
43
+ https://github.com/ExponentialML/Text-To-Video-Finetuning <br />
44
+ https://github.com/camenduru/Text-To-Video-Finetuning-colab <br />
45
+
46
+ ### Base Model
47
+ https://huggingface.co/damo-vilab/modelscope-damo-text-to-video-synthesis <br />
48
+ https://www.modelscope.cn/models/damo/text-to-video-synthesis <br />
49
+
50
+ Thanks to [damo-vilab](https://damo.alibaba.com/) ❤ [ExponentialML](https://github.com/ExponentialML) ❤ [kabachuha](https://github.com/kabachuha) ❤ [@DiffusersLib](https://twitter.com/DiffusersLib) ❤ [@LambdaAPI](https://twitter.com/LambdaAPI) ❤ [@cerspense](https://twitter.com/cerspense) ❤ [@CiaraRowles1](https://twitter.com/CiaraRowles1) ❤ [@p1atdev_art](https://twitter.com/p1atdev_art) ❤ <br />
51
+
52
+ Thanks to Orellius ❤ (important bug report) <br />
53
+
54
+ Please try it 🐣 <br />
55
+ https://github.com/camenduru/text-to-video-synthesis-colab <br />
56
+
57
+ <video src="https://github-production-user-asset-6210df.s3.amazonaws.com/54370274/244223223-c5201c8a-2815-4533-9474-1e312c564f4e.mp4" data-canonical-src="https://github-production-user-asset-6210df.s3.amazonaws.com/54370274/244223223-c5201c8a-2815-4533-9474-1e312c564f4e.mp4" controls="controls" muted="muted" class="d-block rounded-bottom-2 border-top width-fit" style="max-height:640px; min-height: 200px"></video>
58
+
59
+ Potat 2️⃣ is in the oven ♨ <br />
model_index.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "TextToVideoSDPipeline",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "DDIMScheduler"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "CLIPTextModel"
11
+ ],
12
+ "tokenizer": [
13
+ "transformers",
14
+ "CLIPTokenizer"
15
+ ],
16
+ "unet": [
17
+ "diffusers",
18
+ "UNet3DConditionModel"
19
+ ],
20
+ "vae": [
21
+ "diffusers",
22
+ "AutoencoderKL"
23
+ ]
24
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": false,
14
+ "skip_prk_steps": true,
15
+ "steps_offset": 1,
16
+ "thresholding": false,
17
+ "trained_betas": null
18
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./models/model_scope_diffusers/",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.29.2",
24
+ "vocab_size": 49408
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2188379b05015f531d61503e714234d00a64939792f3098b324e516547f0194f
3
+ size 1361674657
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet3DConditionModel",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "./models/model_scope_diffusers/",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": 64,
7
+ "block_out_channels": [
8
+ 320,
9
+ 640,
10
+ 1280,
11
+ 1280
12
+ ],
13
+ "cross_attention_dim": 1024,
14
+ "down_block_types": [
15
+ "CrossAttnDownBlock3D",
16
+ "CrossAttnDownBlock3D",
17
+ "CrossAttnDownBlock3D",
18
+ "DownBlock3D"
19
+ ],
20
+ "downsample_padding": 1,
21
+ "in_channels": 4,
22
+ "layers_per_block": 2,
23
+ "mid_block_scale_factor": 1,
24
+ "norm_eps": 1e-05,
25
+ "norm_num_groups": 32,
26
+ "out_channels": 4,
27
+ "sample_size": 32,
28
+ "up_block_types": [
29
+ "UpBlock3D",
30
+ "CrossAttnUpBlock3D",
31
+ "CrossAttnUpBlock3D",
32
+ "CrossAttnUpBlock3D"
33
+ ]
34
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e38f4a953b170000d44140ddf807198414d4cd95a7058224efde14448b8ea185
3
+ size 2823110385
vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.17.0.dev0",
4
+ "_name_or_path": "./models/model_scope_diffusers/",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "scaling_factor": 0.18215,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0d11ff25d00ceaa02f602831d9cfe650509fdc850c0a1bcb2acdfa03bd5d56
3
+ size 167407857