JsST commited on
Commit
ba0a936
·
verified ·
1 Parent(s): c7a25c3

Upload 11 files

Browse files
.gitattributes CHANGED
@@ -1,3 +1,7 @@
1
  version https://git-lfs.github.com/spec/v1
2
  oid sha256:8f1f58fd32af1d9afe3c8d06ebe19deef3656ab96408bbdfb46cf45e8eeb0bc3
3
  size 38
 
 
 
 
 
1
  version https://git-lfs.github.com/spec/v1
2
  oid sha256:8f1f58fd32af1d9afe3c8d06ebe19deef3656ab96408bbdfb46cf45e8eeb0bc3
3
  size 38
4
+ Stage1/llavaphi-2.7b-pretrain/mm_projector.bin filter=lfs diff=lfs merge=lfs -text
5
+ Stage1/llavaphi-2.7b-pretrain/runs/Mar23_12-41-12_r750xa/events.out.tfevents.1711197698.r750xa filter=lfs diff=lfs merge=lfs -text
6
+ Stage1/llavastablelm-1.6b-pretrain/mm_projector.bin filter=lfs diff=lfs merge=lfs -text
7
+ Stage1/llavastablelm-1.6b-pretrain/runs/Apr05_14-33-25_r750xa/events.out.tfevents.1712327628.r750xa filter=lfs diff=lfs merge=lfs -text
Stage1/llavaphi-2.7b-pretrain/config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/songtaojiang/TinyLLaVABench/phi-2",
3
+ "architectures": [
4
+ "PhiForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_phi.PhiConfig",
9
+ "AutoModelForCausalLM": "modeling_phi.PhiForCausalLM"
10
+ },
11
+ "bos_token_id": 50256,
12
+ "embd_pdrop": 0.0,
13
+ "eos_token_id": 50256,
14
+ "freeze_mm_mlp_adapter": false,
15
+ "hidden_act": "gelu_new",
16
+ "hidden_size": 2560,
17
+ "image_aspect_ratio": "square",
18
+ "image_projector_type": "mlp2x_gelu",
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 10240,
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 2048,
23
+ "mm_hidden_size": 1024,
24
+ "mm_image_tower": "/home/songtaojiang/MobileVLM/clip-vit-large-patch14-336",
25
+ "mm_projector_lr": null,
26
+ "mm_use_im_patch_token": false,
27
+ "mm_use_im_start_end": false,
28
+ "mm_video_tower": null,
29
+ "mm_vision_select_feature": "patch",
30
+ "mm_vision_select_layer": -2,
31
+ "model_type": "llava_phi",
32
+ "num_attention_heads": 32,
33
+ "num_hidden_layers": 32,
34
+ "num_key_value_heads": 32,
35
+ "pad_token_id": 50295,
36
+ "partial_rotary_factor": 0.4,
37
+ "qk_layernorm": false,
38
+ "resid_pdrop": 0.1,
39
+ "rope_scaling": null,
40
+ "rope_theta": 10000.0,
41
+ "tie_word_embeddings": false,
42
+ "tokenizer_padding_side": "right",
43
+ "torch_dtype": "float16",
44
+ "transformers_version": "4.37.0",
45
+ "tune_mm_mlp_adapter": true,
46
+ "use_cache": true,
47
+ "use_mm_proj": true,
48
+ "video_global_proj": false,
49
+ "video_projector_type": "linear",
50
+ "video_spatial_proj": false,
51
+ "video_temproal_proj": false,
52
+ "vocab_size": 51200
53
+ }
Stage1/llavaphi-2.7b-pretrain/mm_projector.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b175aec5a7168d0477a868d7aae27fea64d072204f0274e55aaa74845cf868d
3
+ size 18362045
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_07-36-03_r750xa/events.out.tfevents.1711179389.r750xa ADDED
Binary file (4.04 kB). View file
 
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_07-37-06_r750xa/events.out.tfevents.1711179453.r750xa ADDED
Binary file (5.65 kB). View file
 
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_07-48-53_r750xa/events.out.tfevents.1711180167.r750xa ADDED
Binary file (4.04 kB). View file
 
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_07-50-29_r750xa/events.out.tfevents.1711180259.r750xa ADDED
Binary file (49.7 kB). View file
 
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_09-07-31_r750xa/events.out.tfevents.1711184883.r750xa ADDED
Binary file (237 kB). View file
 
Stage1/llavaphi-2.7b-pretrain/runs/Mar23_12-41-12_r750xa/events.out.tfevents.1711197698.r750xa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c58e5c15cd9e173a9250676fae8fecdfd4daa49807995610617415fa4cd16241
3
+ size 2294058
Stage1/llavastablelm-1.6b-pretrain/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/songtaojiang/TinyLLaVABench/stablelm-2-1_6b",
3
+ "architectures": [
4
+ "StableLmForCausalLM"
5
+ ],
6
+ "bos_token_id": 100257,
7
+ "eos_token_id": 100257,
8
+ "freeze_mm_mlp_adapter": false,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 2048,
11
+ "image_aspect_ratio": "square",
12
+ "image_projector_type": "mlp2x_gelu",
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5632,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 4096,
17
+ "mm_hidden_size": 1024,
18
+ "mm_image_tower": "/home/songtaojiang/MobileVLM/clip-vit-large-patch14-336",
19
+ "mm_projector_lr": null,
20
+ "mm_use_im_patch_token": false,
21
+ "mm_use_im_start_end": false,
22
+ "mm_video_tower": null,
23
+ "mm_vision_select_feature": "patch",
24
+ "mm_vision_select_layer": -2,
25
+ "model_type": "llava_stablelm",
26
+ "norm_eps": 1e-05,
27
+ "num_attention_heads": 32,
28
+ "num_hidden_layers": 24,
29
+ "num_key_value_heads": 32,
30
+ "pad_token_id": 100280,
31
+ "partial_rotary_factor": 0.25,
32
+ "rope_pct": 0.25,
33
+ "rope_theta": 10000,
34
+ "tie_word_embeddings": false,
35
+ "tokenizer_padding_side": "right",
36
+ "torch_dtype": "float16",
37
+ "transformers_version": "4.39.2",
38
+ "tune_mm_mlp_adapter": true,
39
+ "use_cache": true,
40
+ "use_mm_proj": true,
41
+ "use_qkv_bias": true,
42
+ "video_global_proj": false,
43
+ "video_projector_type": "linear",
44
+ "video_spatial_proj": false,
45
+ "video_temproal_proj": false,
46
+ "vocab_size": 100352
47
+ }
Stage1/llavastablelm-1.6b-pretrain/mm_projector.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7b5ccec7fc405e95d464ae99c61dda98f0e3e22d1ae421ca383877c4d37a830
3
+ size 12592829
Stage1/llavastablelm-1.6b-pretrain/runs/Apr05_14-33-25_r750xa/events.out.tfevents.1712327628.r750xa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab281e4bf8a2c980475f899ef0359d6f09c270ffb94f0bf640d9036b2828c1f4
3
+ size 3081128