emon-j commited on
Commit
e011873
1 Parent(s): 1a43e59

int4 quantized LLaVA multimodal

Browse files
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "liuhaotian/LLaVA-Lightning-MPT-7B-preview",
3
+ "architectures": [
4
+ "LlavaMPTForCausalLM"
5
+ ],
6
+ "attn_config": {
7
+ "alibi": true,
8
+ "alibi_bias_max": 8,
9
+ "attn_impl": "torch",
10
+ "attn_pdrop": 0,
11
+ "attn_type": "multihead_attention",
12
+ "attn_uses_sequence_id": false,
13
+ "clip_qkv": null,
14
+ "prefix_lm": false,
15
+ "qk_ln": false,
16
+ "softmax_scale": null
17
+ },
18
+ "d_model": 4096,
19
+ "emb_pdrop": 0,
20
+ "embedding_fraction": 1.0,
21
+ "expansion_ratio": 4,
22
+ "freeze_mm_mlp_adapter": false,
23
+ "hidden_size": 4096,
24
+ "init_config": {
25
+ "emb_init_std": null,
26
+ "emb_init_uniform_lim": null,
27
+ "fan_mode": "fan_in",
28
+ "init_div_is_residual": true,
29
+ "init_gain": 0,
30
+ "init_nonlinearity": "relu",
31
+ "init_std": 0.02,
32
+ "name": "kaiming_normal_",
33
+ "verbose": 0
34
+ },
35
+ "init_device": "cpu",
36
+ "learned_pos_emb": true,
37
+ "logit_scale": null,
38
+ "max_seq_len": 2048,
39
+ "mm_hidden_size": 1024,
40
+ "mm_use_im_start_end": true,
41
+ "mm_vision_select_layer": -2,
42
+ "mm_vision_tower": "openai/clip-vit-large-patch14",
43
+ "model_type": "llava_mpt",
44
+ "n_heads": 32,
45
+ "n_layers": 32,
46
+ "no_bias": true,
47
+ "norm_type": "low_precision_layernorm",
48
+ "resid_pdrop": 0,
49
+ "sep_image_conv_front": false,
50
+ "tokenizer_name": "sam-mosaic/gpt-neox-20b-chatml",
51
+ "torch_dtype": "float16",
52
+ "transformers_version": "4.34.1",
53
+ "tune_mm_mlp_adapter": false,
54
+ "use_cache": true,
55
+ "use_mm_proj": true,
56
+ "verbose": 0,
57
+ "vocab_size": 50282
58
+ }
image_encoder.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b074be38b0a337d021c092006ccd28d2554b388a6c13b3953f6760dcbf357d4
3
+ size 296606914
image_encoder.xml ADDED
The diff for this file is too large to render. See raw diff
 
llava_input_embed.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06addafc07cebdfccef95bb6d87a841f398b44d5df70b2d33a7e4e3049d5524d
3
+ size 4223753918
llava_input_embed.xml ADDED
The diff for this file is too large to render. See raw diff
 
llava_with_past.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c176af2f281e64725de369f710a78ed63cd660af12922a4db1199e523f531d4c
3
+ size 4231790254
llava_with_past.xml ADDED
The diff for this file is too large to render. See raw diff
 
token_embed.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1203d0339c74402b0554f67768e20b5ec8274f647ff158cdca9083041907ad2
3
+ size 411910148
token_embed.xml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <net name="Model3" version="11">
3
+ <layers>
4
+ <layer id="0" name="x" type="Parameter" version="opset1">
5
+ <data shape="?,?" element_type="i64" />
6
+ <output>
7
+ <port id="0" precision="I64" names="x">
8
+ <dim>-1</dim>
9
+ <dim>-1</dim>
10
+ </port>
11
+ </output>
12
+ </layer>
13
+ <layer id="1" name="self.transformer.wte.weight_compressed" type="Const" version="opset1">
14
+ <data element_type="f16" shape="50282, 4096" offset="0" size="411910144" />
15
+ <output>
16
+ <port id="0" precision="FP16" names="4,self.transformer.wte.weight">
17
+ <dim>50282</dim>
18
+ <dim>4096</dim>
19
+ </port>
20
+ </output>
21
+ </layer>
22
+ <layer id="2" name="self.transformer.wte.weight" type="Convert" version="opset1">
23
+ <data destination_type="f32" />
24
+ <rt_info>
25
+ <attribute name="decompression" version="0" />
26
+ </rt_info>
27
+ <input>
28
+ <port id="0" precision="FP16">
29
+ <dim>50282</dim>
30
+ <dim>4096</dim>
31
+ </port>
32
+ </input>
33
+ <output>
34
+ <port id="1" precision="FP32">
35
+ <dim>50282</dim>
36
+ <dim>4096</dim>
37
+ </port>
38
+ </output>
39
+ </layer>
40
+ <layer id="3" name="__module.transformer.wte/aten::embedding/Convert" type="Convert" version="opset1">
41
+ <data destination_type="i32" />
42
+ <input>
43
+ <port id="0" precision="I64">
44
+ <dim>-1</dim>
45
+ <dim>-1</dim>
46
+ </port>
47
+ </input>
48
+ <output>
49
+ <port id="1" precision="I32">
50
+ <dim>-1</dim>
51
+ <dim>-1</dim>
52
+ </port>
53
+ </output>
54
+ </layer>
55
+ <layer id="4" name="__module.transformer.wte/aten::embedding/Constant" type="Const" version="opset1">
56
+ <data element_type="i32" shape="" offset="411910144" size="4" />
57
+ <output>
58
+ <port id="0" precision="I32" />
59
+ </output>
60
+ </layer>
61
+ <layer id="5" name="__module.transformer.wte/aten::embedding/Gather" type="Gather" version="opset8">
62
+ <data batch_dims="0" />
63
+ <input>
64
+ <port id="0" precision="FP32">
65
+ <dim>50282</dim>
66
+ <dim>4096</dim>
67
+ </port>
68
+ <port id="1" precision="I32">
69
+ <dim>-1</dim>
70
+ <dim>-1</dim>
71
+ </port>
72
+ <port id="2" precision="I32" />
73
+ </input>
74
+ <output>
75
+ <port id="3" precision="FP32">
76
+ <dim>-1</dim>
77
+ <dim>-1</dim>
78
+ <dim>4096</dim>
79
+ </port>
80
+ </output>
81
+ </layer>
82
+ <layer id="6" name="Result_48770" type="Result" version="opset1">
83
+ <input>
84
+ <port id="0" precision="FP32">
85
+ <dim>-1</dim>
86
+ <dim>-1</dim>
87
+ <dim>4096</dim>
88
+ </port>
89
+ </input>
90
+ </layer>
91
+ </layers>
92
+ <edges>
93
+ <edge from-layer="0" from-port="0" to-layer="3" to-port="0" />
94
+ <edge from-layer="1" from-port="0" to-layer="2" to-port="0" />
95
+ <edge from-layer="2" from-port="1" to-layer="5" to-port="0" />
96
+ <edge from-layer="3" from-port="1" to-layer="5" to-port="1" />
97
+ <edge from-layer="4" from-port="0" to-layer="5" to-port="2" />
98
+ <edge from-layer="5" from-port="3" to-layer="6" to-port="0" />
99
+ </edges>
100
+ <rt_info>
101
+ <Runtime_version value="2023.2.0-13105-ff7b49c14d1-HEAD" />
102
+ <conversion_parameters>
103
+ <framework value="pytorch" />
104
+ <is_python_object value="True" />
105
+ </conversion_parameters>
106
+ </rt_info>
107
+ </net>