erfanzar commited on
Commit
5659e2a
1 Parent(s): dcd4c53

Upload LlamaForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +103 -0
  2. generation_config.json +6 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "meta-llama/Llama-2-7b-hf",
3
+ "a_ps": [
4
+ [
5
+ "dp",
6
+ "fsdp"
7
+ ],
8
+ "sp",
9
+ "tp",
10
+ null
11
+ ],
12
+ "architectures": [
13
+ "LlamaForCausalLM"
14
+ ],
15
+ "attention_bias": false,
16
+ "attention_dropout": 0.0,
17
+ "axis_dims": [
18
+ 1,
19
+ -1,
20
+ 1,
21
+ 1
22
+ ],
23
+ "axis_names": [
24
+ "dp",
25
+ "fsdp",
26
+ "tp",
27
+ "sp"
28
+ ],
29
+ "b_ps": [
30
+ [
31
+ "dp",
32
+ "fsdp"
33
+ ],
34
+ null,
35
+ null,
36
+ null
37
+ ],
38
+ "backend": null,
39
+ "bits": null,
40
+ "bos_token_id": 1,
41
+ "c_max_position_embeddings": 2048,
42
+ "easy_method": "train",
43
+ "embd_pdrop": 0.0,
44
+ "eos_token_id": 2,
45
+ "fcm_max_ratio": 0.0,
46
+ "fcm_min_ratio": 0.0,
47
+ "flash_attn_key_chunk_size": 1024,
48
+ "flash_attn_query_chunk_size": 1024,
49
+ "freq_max_position_embeddings": 2048,
50
+ "hidden_act": "silu",
51
+ "hidden_size": 2048,
52
+ "initializer_range": 0.02,
53
+ "intermediate_size": 5632,
54
+ "k_ps": [
55
+ [
56
+ "dp",
57
+ "fsdp"
58
+ ],
59
+ "sp",
60
+ "tp",
61
+ null
62
+ ],
63
+ "max_position_embeddings": 2048,
64
+ "model_type": "llama",
65
+ "num_attention_heads": 32,
66
+ "num_hidden_layers": 22,
67
+ "num_key_value_heads": 4,
68
+ "number_rep_kv": 1,
69
+ "pretraining_tp": 1,
70
+ "q_ps": [
71
+ [
72
+ "dp",
73
+ "fsdp"
74
+ ],
75
+ "sp",
76
+ "tp",
77
+ null
78
+ ],
79
+ "resid_pdrop": 0.0,
80
+ "rms_norm_eps": 1e-05,
81
+ "rope_scaling": null,
82
+ "rope_theta": 10000.0,
83
+ "scan_layers": true,
84
+ "scan_mlp_chunk_size": 1024,
85
+ "tie_word_embeddings": false,
86
+ "torch_dtype": "float16",
87
+ "transformers_version": "4.36.0",
88
+ "use_cache": true,
89
+ "use_flash_attention": false,
90
+ "use_pjit_attention_force": false,
91
+ "use_sacn_mlp": false,
92
+ "use_shard_map": false,
93
+ "v_ps": [
94
+ [
95
+ "dp",
96
+ "fsdp"
97
+ ],
98
+ "sp",
99
+ "tp",
100
+ null
101
+ ],
102
+ "vocab_size": 32000
103
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.0"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac659dbd72708db677184addb953840a37bb70fbd94b364461bee5de70b7b6be
3
+ size 2200119664