Fudan-FUXI commited on
Commit
d1fa6ec
1 Parent(s): e499a98

Upload 4 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 256,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 128,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "35.self_attn.k_proj",
24
+ "model.layers.4.self_attn.q_proj",
25
+ "27.self_attn.k_proj",
26
+ "model.layers.25.self_attn.q_proj",
27
+ "model.layers.10.self_attn.v_proj",
28
+ "38.self_attn.v_proj",
29
+ "model.layers.6.self_attn.k_proj",
30
+ "model.layers.6.self_attn.v_proj",
31
+ "30.self_attn.v_proj",
32
+ "model.layers.18.self_attn.k_proj",
33
+ "model.layers.13.self_attn.q_proj",
34
+ "model.layers.11.self_attn.q_proj",
35
+ "37.self_attn.q_proj",
36
+ "model.layers.19.self_attn.k_proj",
37
+ "model.layers.12.self_attn.k_proj",
38
+ "model.layers.26.self_attn.q_proj",
39
+ "model.layers.22.self_attn.q_proj",
40
+ "model.layers.12.self_attn.q_proj",
41
+ "model.layers.11.self_attn.k_proj",
42
+ "33.self_attn.v_proj",
43
+ "model.layers.13.self_attn.k_proj",
44
+ "model.layers.13.self_attn.v_proj",
45
+ "model.layers.18.self_attn.v_proj",
46
+ "31.self_attn.v_proj",
47
+ "29.self_attn.k_proj",
48
+ "38.self_attn.q_proj",
49
+ "model.layers.25.self_attn.v_proj",
50
+ "model.layers.7.self_attn.v_proj",
51
+ "model.layers.20.self_attn.q_proj",
52
+ "model.layers.1.self_attn.k_proj",
53
+ "39.self_attn.k_proj",
54
+ "36.self_attn.k_proj",
55
+ "model.layers.10.self_attn.q_proj",
56
+ "28.self_attn.k_proj",
57
+ "27.self_attn.q_proj",
58
+ "model.layers.9.self_attn.q_proj",
59
+ "28.self_attn.q_proj",
60
+ "model.layers.6.self_attn.q_proj",
61
+ "34.self_attn.v_proj",
62
+ "model.layers.8.self_attn.k_proj",
63
+ "model.layers.25.self_attn.k_proj",
64
+ "model.layers.16.self_attn.k_proj",
65
+ "model.layers.7.self_attn.k_proj",
66
+ "31.self_attn.q_proj",
67
+ "28.self_attn.v_proj",
68
+ "model.layers.14.self_attn.k_proj",
69
+ "up_proj",
70
+ "model.layers.10.self_attn.k_proj",
71
+ "model.layers.19.self_attn.v_proj",
72
+ "model.layers.19.self_attn.q_proj",
73
+ "model.layers.21.self_attn.q_proj",
74
+ "model.layers.12.self_attn.v_proj",
75
+ "model.layers.23.self_attn.k_proj",
76
+ "39.self_attn.v_proj",
77
+ "model.layers.26.self_attn.v_proj",
78
+ "36.self_attn.v_proj",
79
+ "32.self_attn.k_proj",
80
+ "37.self_attn.k_proj",
81
+ "model.layers.0.self_attn.q_proj",
82
+ "model.layers.2.self_attn.q_proj",
83
+ "o_proj",
84
+ "30.self_attn.k_proj",
85
+ "29.self_attn.v_proj",
86
+ "model.layers.7.self_attn.q_proj",
87
+ "down_proj",
88
+ "model.layers.17.self_attn.k_proj",
89
+ "model.layers.15.self_attn.v_proj",
90
+ "38.self_attn.k_proj",
91
+ "35.self_attn.v_proj",
92
+ "model.layers.15.self_attn.q_proj",
93
+ "model.layers.23.self_attn.q_proj",
94
+ "model.layers.21.self_attn.k_proj",
95
+ "model.layers.4.self_attn.k_proj",
96
+ "model.layers.17.self_attn.v_proj",
97
+ "33.self_attn.k_proj",
98
+ "model.layers.22.self_attn.k_proj",
99
+ "34.self_attn.k_proj",
100
+ "model.layers.5.self_attn.k_proj",
101
+ "32.self_attn.q_proj",
102
+ "model.layers.17.self_attn.q_proj",
103
+ "model.layers.22.self_attn.v_proj",
104
+ "model.layers.2.self_attn.v_proj",
105
+ "model.layers.20.self_attn.v_proj",
106
+ "model.layers.23.self_attn.v_proj",
107
+ "30.self_attn.q_proj",
108
+ "model.layers.1.self_attn.q_proj",
109
+ "31.self_attn.k_proj",
110
+ "32.self_attn.v_proj",
111
+ "model.layers.14.self_attn.q_proj",
112
+ "model.layers.1.self_attn.v_proj",
113
+ "model.layers.14.self_attn.v_proj",
114
+ "model.layers.2.self_attn.k_proj",
115
+ "model.layers.5.self_attn.q_proj",
116
+ "model.layers.5.self_attn.v_proj",
117
+ "model.layers.24.self_attn.k_proj",
118
+ "27.self_attn.v_proj",
119
+ "model.layers.8.self_attn.q_proj",
120
+ "model.layers.21.self_attn.v_proj",
121
+ "37.self_attn.v_proj",
122
+ "29.self_attn.q_proj",
123
+ "model.layers.11.self_attn.v_proj",
124
+ "model.layers.9.self_attn.k_proj",
125
+ "model.layers.18.self_attn.q_proj",
126
+ "model.layers.0.self_attn.v_proj",
127
+ "39.self_attn.q_proj",
128
+ "33.self_attn.q_proj",
129
+ "model.layers.16.self_attn.q_proj",
130
+ "model.layers.0.self_attn.k_proj",
131
+ "model.layers.26.self_attn.k_proj",
132
+ "36.self_attn.q_proj",
133
+ "34.self_attn.q_proj",
134
+ "model.layers.9.self_attn.v_proj",
135
+ "model.layers.24.self_attn.q_proj",
136
+ "model.layers.3.self_attn.q_proj",
137
+ "model.layers.20.self_attn.k_proj",
138
+ "35.self_attn.q_proj",
139
+ "model.layers.3.self_attn.v_proj",
140
+ "model.layers.16.self_attn.v_proj",
141
+ "model.layers.3.self_attn.k_proj",
142
+ "model.layers.15.self_attn.k_proj",
143
+ "model.layers.8.self_attn.v_proj",
144
+ "gate_proj",
145
+ "model.layers.24.self_attn.v_proj",
146
+ "model.layers.4.self_attn.v_proj"
147
+ ],
148
+ "task_type": "CAUSAL_LM",
149
+ "use_dora": false,
150
+ "use_rslora": false
151
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c751b388d0af63dc7e757fc16bfb817a26fd4a33ef522f2b668f381a85173df
3
+ size 1001469184
config.json ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./checkpoints/24-1107-vila-v1.5-13b-lora-r128-alpha256-lr-1e5-hdvila1000+human_feedback_multi_level-25epoch-release",
3
+ "architectures": [
4
+ "LlavaLlamaModel"
5
+ ],
6
+ "drop_path_rate": 0.0,
7
+ "fps": 0.0,
8
+ "hidden_size": 5120,
9
+ "image_aspect_ratio": "resize",
10
+ "interpolate_mode": "linear",
11
+ "llm_cfg": {
12
+ "_name_or_path": "./llm",
13
+ "add_cross_attention": false,
14
+ "architectures": [
15
+ "LlamaForCausalLM"
16
+ ],
17
+ "attention_bias": false,
18
+ "attention_dropout": 0.0,
19
+ "bad_words_ids": null,
20
+ "begin_suppress_tokens": null,
21
+ "bos_token_id": 1,
22
+ "chunk_size_feed_forward": 0,
23
+ "cross_attention_hidden_size": null,
24
+ "decoder_start_token_id": null,
25
+ "diversity_penalty": 0.0,
26
+ "do_sample": false,
27
+ "early_stopping": false,
28
+ "encoder_no_repeat_ngram_size": 0,
29
+ "eos_token_id": 2,
30
+ "exponential_decay_length_penalty": null,
31
+ "finetuning_task": null,
32
+ "forced_bos_token_id": null,
33
+ "forced_eos_token_id": null,
34
+ "hidden_act": "silu",
35
+ "hidden_size": 5120,
36
+ "id2label": {
37
+ "0": "LABEL_0",
38
+ "1": "LABEL_1"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 13824,
42
+ "is_decoder": false,
43
+ "is_encoder_decoder": false,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1
47
+ },
48
+ "length_penalty": 1.0,
49
+ "max_length": 4096,
50
+ "max_position_embeddings": 4096,
51
+ "min_length": 0,
52
+ "model_max_length": 4096,
53
+ "model_type": "llama",
54
+ "no_repeat_ngram_size": 0,
55
+ "num_attention_heads": 40,
56
+ "num_beam_groups": 1,
57
+ "num_beams": 1,
58
+ "num_hidden_layers": 40,
59
+ "num_key_value_heads": 40,
60
+ "num_return_sequences": 1,
61
+ "output_attentions": false,
62
+ "output_hidden_states": false,
63
+ "output_scores": false,
64
+ "pad_token_id": 0,
65
+ "prefix": null,
66
+ "pretraining_tp": 1,
67
+ "problem_type": null,
68
+ "pruned_heads": {},
69
+ "remove_invalid_values": false,
70
+ "repetition_penalty": 1.0,
71
+ "return_dict": true,
72
+ "return_dict_in_generate": false,
73
+ "rms_norm_eps": 1e-05,
74
+ "rope_scaling": null,
75
+ "rope_theta": 10000.0,
76
+ "sep_token_id": null,
77
+ "suppress_tokens": null,
78
+ "task_specific_params": null,
79
+ "temperature": 1.0,
80
+ "tf_legacy_loss": false,
81
+ "tie_encoder_decoder": false,
82
+ "tie_word_embeddings": false,
83
+ "tokenizer_class": null,
84
+ "tokenizer_model_max_length": 4096,
85
+ "tokenizer_padding_side": "right",
86
+ "top_k": 50,
87
+ "top_p": 1.0,
88
+ "torch_dtype": "bfloat16",
89
+ "torchscript": false,
90
+ "typical_p": 1.0,
91
+ "use_bfloat16": false,
92
+ "use_cache": true,
93
+ "vocab_size": 32000
94
+ },
95
+ "mm_hidden_size": 1152,
96
+ "mm_projector_cfg": {
97
+ "_name_or_path": "./mm_projector",
98
+ "add_cross_attention": false,
99
+ "architectures": [
100
+ "MultimodalProjector"
101
+ ],
102
+ "bad_words_ids": null,
103
+ "begin_suppress_tokens": null,
104
+ "bos_token_id": null,
105
+ "chunk_size_feed_forward": 0,
106
+ "cross_attention_hidden_size": null,
107
+ "decoder_start_token_id": null,
108
+ "diversity_penalty": 0.0,
109
+ "do_sample": false,
110
+ "early_stopping": false,
111
+ "encoder_no_repeat_ngram_size": 0,
112
+ "eos_token_id": null,
113
+ "exponential_decay_length_penalty": null,
114
+ "finetuning_task": null,
115
+ "forced_bos_token_id": null,
116
+ "forced_eos_token_id": null,
117
+ "id2label": {
118
+ "0": "LABEL_0",
119
+ "1": "LABEL_1"
120
+ },
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "length_penalty": 1.0,
128
+ "max_length": 20,
129
+ "min_length": 0,
130
+ "mm_projector_type": "mlp_downsample",
131
+ "model_type": "v2l_projector",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_beam_groups": 1,
134
+ "num_beams": 1,
135
+ "num_return_sequences": 1,
136
+ "output_attentions": false,
137
+ "output_hidden_states": false,
138
+ "output_scores": false,
139
+ "pad_token_id": null,
140
+ "prefix": null,
141
+ "problem_type": null,
142
+ "pruned_heads": {},
143
+ "remove_invalid_values": false,
144
+ "repetition_penalty": 1.0,
145
+ "return_dict": true,
146
+ "return_dict_in_generate": false,
147
+ "sep_token_id": null,
148
+ "suppress_tokens": null,
149
+ "task_specific_params": null,
150
+ "temperature": 1.0,
151
+ "tf_legacy_loss": false,
152
+ "tie_encoder_decoder": false,
153
+ "tie_word_embeddings": true,
154
+ "tokenizer_class": null,
155
+ "top_k": 50,
156
+ "top_p": 1.0,
157
+ "torch_dtype": "bfloat16",
158
+ "torchscript": false,
159
+ "typical_p": 1.0,
160
+ "use_bfloat16": false
161
+ },
162
+ "mm_projector_lr": null,
163
+ "mm_use_im_patch_token": false,
164
+ "mm_use_im_start_end": false,
165
+ "mm_vision_select_feature": "cls_patch",
166
+ "mm_vision_select_layer": -2,
167
+ "model_dtype": "torch.bfloat16",
168
+ "model_type": "llava_llama",
169
+ "num_video_frames": 8,
170
+ "resume_path": "./checkpoints/24-1107-vila-v1.5-13b-lora-r128-alpha256-lr-1e5-hdvila1000+human_feedback_multi_level-25epoch-release",
171
+ "s2": false,
172
+ "s2_max_split_size": 336,
173
+ "s2_scales": "336,672,1008",
174
+ "transformers_version": "4.37.2",
175
+ "tune_language_model": false,
176
+ "tune_mm_projector": false,
177
+ "tune_vision_tower": false,
178
+ "vision_resolution": -1,
179
+ "vision_tower_cfg": {
180
+ "_name_or_path": "./vision_tower",
181
+ "add_cross_attention": false,
182
+ "architectures": [
183
+ "SiglipVisionModel"
184
+ ],
185
+ "attention_dropout": 0.0,
186
+ "bad_words_ids": null,
187
+ "begin_suppress_tokens": null,
188
+ "bos_token_id": null,
189
+ "chunk_size_feed_forward": 0,
190
+ "cross_attention_hidden_size": null,
191
+ "decoder_start_token_id": null,
192
+ "diversity_penalty": 0.0,
193
+ "do_sample": false,
194
+ "early_stopping": false,
195
+ "encoder_no_repeat_ngram_size": 0,
196
+ "eos_token_id": null,
197
+ "exponential_decay_length_penalty": null,
198
+ "finetuning_task": null,
199
+ "forced_bos_token_id": null,
200
+ "forced_eos_token_id": null,
201
+ "hidden_act": "gelu_pytorch_tanh",
202
+ "hidden_size": 1152,
203
+ "id2label": {
204
+ "0": "LABEL_0",
205
+ "1": "LABEL_1"
206
+ },
207
+ "image_size": 384,
208
+ "intermediate_size": 4304,
209
+ "is_decoder": false,
210
+ "is_encoder_decoder": false,
211
+ "label2id": {
212
+ "LABEL_0": 0,
213
+ "LABEL_1": 1
214
+ },
215
+ "layer_norm_eps": 1e-06,
216
+ "length_penalty": 1.0,
217
+ "max_length": 20,
218
+ "min_length": 0,
219
+ "model_type": "siglip_vision_model",
220
+ "no_repeat_ngram_size": 0,
221
+ "num_attention_heads": 16,
222
+ "num_beam_groups": 1,
223
+ "num_beams": 1,
224
+ "num_channels": 3,
225
+ "num_hidden_layers": 27,
226
+ "num_return_sequences": 1,
227
+ "output_attentions": false,
228
+ "output_hidden_states": false,
229
+ "output_scores": false,
230
+ "pad_token_id": null,
231
+ "patch_size": 14,
232
+ "prefix": null,
233
+ "problem_type": null,
234
+ "pruned_heads": {},
235
+ "remove_invalid_values": false,
236
+ "repetition_penalty": 1.0,
237
+ "return_dict": true,
238
+ "return_dict_in_generate": false,
239
+ "sep_token_id": null,
240
+ "suppress_tokens": null,
241
+ "task_specific_params": null,
242
+ "temperature": 1.0,
243
+ "tf_legacy_loss": false,
244
+ "tie_encoder_decoder": false,
245
+ "tie_word_embeddings": true,
246
+ "tokenizer_class": null,
247
+ "top_k": 50,
248
+ "top_p": 1.0,
249
+ "torch_dtype": "bfloat16",
250
+ "torchscript": false,
251
+ "typical_p": 1.0,
252
+ "use_bfloat16": false
253
+ }
254
+ }
non_lora_trainables.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60fb82c3660319e6d0b239950b20c28181e97f1ade117dc0660b40e2ad94a89b
3
+ size 912