MrD0502 commited on
Commit
59b2e96
1 Parent(s): 16eff38
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/pythia-1.3b-deduped",
3
+ "architectures": [
4
+ "GPTNeoXForCausalLM"
5
+ ],
6
+ "bad_words_ids": [
7
+ [
8
+ 434,
9
+ 15694,
10
+ 66,
11
+ 27,
12
+ 209
13
+ ],
14
+ [
15
+ 15362
16
+ ],
17
+ [
18
+ 1713
19
+ ],
20
+ [
21
+ 1713,
22
+ 64
23
+ ],
24
+ [
25
+ 1713,
26
+ 876
27
+ ],
28
+ [
29
+ 2016,
30
+ 251,
31
+ 857,
32
+ 75,
33
+ 9194,
34
+ 35478
35
+ ],
36
+ [
37
+ 2391
38
+ ],
39
+ [
40
+ 20340
41
+ ],
42
+ [
43
+ 33021
44
+ ],
45
+ [
46
+ 2391,
47
+ 1051
48
+ ],
49
+ [
50
+ 5638
51
+ ],
52
+ [
53
+ 2391,
54
+ 20340
55
+ ],
56
+ [
57
+ 5638,
58
+ 537
59
+ ],
60
+ [
61
+ 1559,
62
+ 2345
63
+ ],
64
+ [
65
+ 1559,
66
+ 7849
67
+ ],
68
+ [
69
+ 1559,
70
+ 17379
71
+ ],
72
+ [
73
+ 25321,
74
+ 4611
75
+ ]
76
+ ],
77
+ "bos_token_id": 0,
78
+ "eos_token_id": 0,
79
+ "hidden_act": "gelu",
80
+ "hidden_size": 2048,
81
+ "initializer_range": 0.02,
82
+ "intermediate_size": 8192,
83
+ "layer_norm_eps": 1e-05,
84
+ "max_position_embeddings": 2048,
85
+ "model_type": "gpt_neox",
86
+ "num_attention_heads": 16,
87
+ "num_hidden_layers": 24,
88
+ "rotary_emb_base": 10000,
89
+ "rotary_pct": 0.25,
90
+ "tie_word_embeddings": false,
91
+ "torch_dtype": "float16",
92
+ "transformers_version": "4.25.1",
93
+ "use_cache": true,
94
+ "use_parallel_residual": true,
95
+ "vocab_size": 50304
96
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3228245a35970cafbebea523525daf888f8a04c433ca2e277883b0ad98da96c3
3
+ size 2930076797
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "name_or_path": "EleutherAI/gpt-neox-20b",
6
+ "special_tokens_map_file": "/fsx/home-hailey/.cache/huggingface/hub/models--EleutherAI--gpt-neox-20b/snapshots/3523781c8df75f7741687a4284f6f70e1afa12f4/special_tokens_map.json",
7
+ "tokenizer_class": "GPTNeoXTokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }