0x0dad0 commited on
Commit
716a9cb
1 Parent(s): f21f704

Upload GemmaForCausalLM

Browse files
config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_name_or_path": "gemcy_v2",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
@@ -13,16 +13,14 @@
13
  "initializer_range": 0.02,
14
  "intermediate_size": 16384,
15
  "max_position_embeddings": 8192,
16
- "model_type": "llama",
17
  "num_attention_heads": 8,
18
  "num_hidden_layers": 18,
19
  "num_key_value_heads": 1,
20
  "pad_token_id": 0,
21
- "pretraining_tp": 1,
22
  "rms_norm_eps": 1e-06,
23
  "rope_scaling": null,
24
  "rope_theta": 10000.0,
25
- "tie_word_embeddings": false,
26
  "torch_dtype": "bfloat16",
27
  "transformers_version": "4.38.1",
28
  "use_cache": false,
 
1
  {
2
  "_name_or_path": "gemcy_v2",
3
  "architectures": [
4
+ "GemmaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
 
13
  "initializer_range": 0.02,
14
  "intermediate_size": 16384,
15
  "max_position_embeddings": 8192,
16
+ "model_type": "gemma",
17
  "num_attention_heads": 8,
18
  "num_hidden_layers": 18,
19
  "num_key_value_heads": 1,
20
  "pad_token_id": 0,
 
21
  "rms_norm_eps": 1e-06,
22
  "rope_scaling": null,
23
  "rope_theta": 10000.0,
 
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.38.1",
26
  "use_cache": false,
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90fb45f150b310fe562a67279e12837b07083205c48783c75119a00eadadd0ca
3
- size 1115697720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01eedac83e4049f0f25a2eab432c9acba2926a73da853cd4987ff9043a23e3ce
3
+ size 67121608
model.safetensors.index.json CHANGED
@@ -1,9 +1,8 @@
1
  {
2
  "metadata": {
3
- "total_size": 6060920832
4
  },
5
  "weight_map": {
6
- "lm_head.weight": "model-00002-of-00002.safetensors",
7
  "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
  "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
  "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 5012344832
4
  },
5
  "weight_map": {
 
6
  "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
7
  "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
8
  "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",