koolkittykat commited on
Commit
e3db6e5
1 Parent(s): d99047c

Upload checkpoint trained from Gemma 1.1 2B

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "google/gemma-2b-it",
3
  "architectures": [
4
  "LlavaGemmaForCausalLM"
5
  ],
@@ -9,8 +9,8 @@
9
  "eos_token_id": 1,
10
  "freeze_mm_mlp_adapter": false,
11
  "head_dim": 256,
12
- "hidden_act": "gelu",
13
- "hidden_activation": null,
14
  "hidden_size": 2048,
15
  "image_aspect_ratio": "pad",
16
  "initializer_range": 0.02,
@@ -31,12 +31,11 @@
31
  "num_key_value_heads": 1,
32
  "pad_token_id": 0,
33
  "rms_norm_eps": 1e-06,
34
- "rope_scaling": null,
35
  "rope_theta": 10000.0,
36
- "tokenizer_model_max_length": 3072,
37
  "tokenizer_padding_side": "right",
38
  "torch_dtype": "bfloat16",
39
- "transformers_version": "4.40.1",
40
  "tune_mm_mlp_adapter": false,
41
  "use_cache": true,
42
  "use_mm_proj": true,
 
1
  {
2
+ "_name_or_path": "google/gemma-1.1-2b-it",
3
  "architectures": [
4
  "LlavaGemmaForCausalLM"
5
  ],
 
9
  "eos_token_id": 1,
10
  "freeze_mm_mlp_adapter": false,
11
  "head_dim": 256,
12
+ "hidden_act": "gelu_pytorch_tanh",
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
  "hidden_size": 2048,
15
  "image_aspect_ratio": "pad",
16
  "initializer_range": 0.02,
 
31
  "num_key_value_heads": 1,
32
  "pad_token_id": 0,
33
  "rms_norm_eps": 1e-06,
 
34
  "rope_theta": 10000.0,
35
+ "tokenizer_model_max_length": 4096,
36
  "tokenizer_padding_side": "right",
37
  "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.41.0.dev0",
39
  "tune_mm_mlp_adapter": false,
40
  "use_cache": true,
41
  "use_mm_proj": true,
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 2,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.40.1"
7
  }
 
3
  "bos_token_id": 2,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.41.0.dev0"
7
  }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4a879cdd3b2fc6fbfbd151e222acfc7e1d8d845aaff3876a91679e1258b0697
3
  size 4945242264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:862efa4d98e8d07baf1e5f1ce46c1d24d953445fe6263bfa56578990f54c5657
3
  size 4945242264
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66251e77fab9a5bcd12fcbd136a6da22d91cc5055fb5e16b8a0861bcf0ead821
3
  size 1274422600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:005624fb8471e7e9b59252d5c7ac9bb64797b5d72205517000d2dc939aecac2d
3
  size 1274422600
tokenizer_config.json CHANGED
@@ -1507,7 +1507,7 @@
1507
  "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
1508
  "clean_up_tokenization_spaces": false,
1509
  "eos_token": "<eos>",
1510
- "model_max_length": 3072,
1511
  "pad_token": "<unk>",
1512
  "padding_side": "right",
1513
  "sp_model_kwargs": {},
 
1507
  "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
1508
  "clean_up_tokenization_spaces": false,
1509
  "eos_token": "<eos>",
1510
+ "model_max_length": 4096,
1511
  "pad_token": "<unk>",
1512
  "padding_side": "right",
1513
  "sp_model_kwargs": {},
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1389e5c1499046ef35ff1d499574c6fcb54dfb22378fc45019fc691227d15ffe
3
- size 6648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496f2c2ad43e62b2e6084575d2233fe9782bed50acb223e6f68714bfbe1cbdac
3
+ size 6776