Push model using huggingface_hub.
Browse files- config.json +1 -0
- pytorch_model.bin +3 -0
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"t5_config": {"vocab_size": 33280, "image_vocab_size": 16512, "image_patch_size": 16, "audio_vocab_size": 8320, "emb_dim": 3072, "num_heads": 24, "num_encoder_layers": 24, "num_decoder_layers": 24, "head_dim": 128, "mlp_dim": 8192, "mlp_activations": ["silu", "linear"], "dropout_rate": 0.0, "dropout_broadcast_dims": [-2], "logits_via_embedding": true, "float32_attention_logits": true, "decoder_xattention_internval": 1, "qk_norm": true, "dalle_attn_mask": true, "dynamic_unk_mask": true, "encoder_max_image_length": 576, "encoder_max_audio_length": 128, "encoder_max_text_length": 512, "decoder_max_image_length": 1024, "decoder_max_audio_length": 512, "decoder_max_text_length": 512, "text_pos_emb": "llama_rope", "image_pos_emb": "llama_rope", "audio_pos_emb": "llama_rope", "image_history_pos_emb": "llama_rope", "audio_history_pos_emb": "llama_rope", "image_tokenizer_type": "vqgan", "default_image_size": [256, 256], "default_image_vit_size": [384, 384], "default_image_history_vit_size": [256, 256], "default_audio_size": [256, 128], "default_audio_vit_size": [256, 128], "default_audio_history_vit_size": [256, 128], "image_vit_patch_size": 16, "audio_patch_size": 16, "audio_vit_patch_size": 16}, "image_history_cfg": {"resampler_type": "perceiver", "max_frames": 8, "latents_size": 32, "emb_dim": 1024, "num_heads": 16, "num_layers": 2, "xattention_index": [0, 1], "head_dim": 64, "mlp_dim": 4096, "mlp_activations": ["gelu"], "dropout_rate": 0.0, "dropout_broadcast_dims": [-2], "droppath_rate": 0.0, "layer_drop": 0.0, "xattn_qk_norm": false, "xattn_scaled_cosine": true, "attn_qk_norm": false, "attn_scaled_cosine": true, "float32_attention_logits": true, "clip_attn_logit": null}, "audio_history_cfg": {"resampler_type": "perceiver", "max_frames": 8, "latents_size": 16, "emb_dim": 1024, "num_heads": 16, "num_layers": 2, "xattention_index": [0, 1], "head_dim": 64, "mlp_dim": 4096, "mlp_activations": ["gelu"], "dropout_rate": 0.0, "dropout_broadcast_dims": [-2], "droppath_rate": 0.0, "layer_drop": 0.0, "xattn_qk_norm": false, "xattn_scaled_cosine": true, "attn_qk_norm": false, "attn_scaled_cosine": true, "float32_attention_logits": true, "clip_attn_logit": null}, "freeze_vit": true, "input_modalities": ["text", "image", "image_history", "audio", "audio_history"], "target_modalities": ["text", "image", "audio"], "sequence_length": {"is_training": true, "image_input_samples": 576, "image_history_input_samples": 256, "audio_input_samples": 128, "audio_history_input_samples": 128, "num_frames": 4}, "image_vqgan": {"embed_dim": 4, "n_embed": 16384, "double_z": false, "z_channels": 4, "resolution": 256, "in_channels": 3, "out_ch": 3, "ch": 128, "ch_mult": [1, 2, 2, 4], "num_res_blocks": 2, "attn_resolutions": [32], "dropout": 0, "default_input_size": [256, 256], "patch_size": [8, 8], "checkpoint_path": ""}, "audio_vqgan": {"vocab_size": 8192, "proj_dim": 32, "encoder_hidden_size": 512, "encoder_num_layers": 8, "encoder_mlp_dim": 2048, "encoder_num_heads": 8, "encoder_head_dim": 64, "decoder_hidden_size": 512, "decoder_num_layers": 8, "decoder_mlp_dim": 2048, "decoder_num_heads": 8, "decoder_head_dim": 64, "dropout_rate": 0.0, "droppath_rate": 0.0, "attention_dropout_rate": 0.0, "use_bias": false, "act_fn": "relu", "default_input_size": [128, 256], "patch_size": [8, 8], "output_channel": 1, "checkpoint_path": "", "use_decoder": true}, "image_vit_cfg": {"patch_size": 16, "pos_patch_size": 16, "emb_dim": 768, "num_heads": 12, "num_layers": 11, "head_dim": 64, "mlp_dim": 3072, "mlp_activations": ["gelu"], "dropout_rate": 0.0, "dropout_broadcast_dims": [], "float32_attention_logits": true, "default_input_size": [256, 256], "num_pos": 197}, "audio_vit_cfg": {"vit_embed": true, "patch_size": 16, "pos_patch_size": 16, "emb_dim": 768, "num_heads": 12, "num_layers": 11, "head_dim": 64, "mlp_dim": 3072, "mlp_activations": ["gelu"], "dropout_rate": 0.0, "dropout_broadcast_dims": [], "float32_attention_logits": true, "default_input_size": [256, 128], "transpose_input": true}, "use_image_vit": true, "use_audio_vit": true, "use_image_history_vit": true, "use_audio_history_vit": true}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e73c6c0a84b146b2d146f9c173e5013bcfdff5cd78eefa71097d5c7d2d364764
|
3 |
+
size 14280578111
|