jinymusim commited on
Commit
f30571b
1 Parent(s): 3901994

Upload 9 files

Browse files

Updated Model with tweaked learning

added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|system|>": 50257,
3
+ "<|user|>": 50258
4
+ }
config.json CHANGED
@@ -17,6 +17,7 @@
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
 
20
  "reorder_and_upcast_attn": false,
21
  "resid_pdrop": 0.1,
22
  "scale_attn_by_inverse_layer_idx": false,
@@ -34,5 +35,5 @@
34
  "torch_dtype": "float32",
35
  "transformers_version": "4.27.4",
36
  "use_cache": true,
37
- "vocab_size": 50259
38
  }
 
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
20
+ "pad_token_id": 50256,
21
  "reorder_and_upcast_attn": false,
22
  "resid_pdrop": 0.1,
23
  "scale_attn_by_inverse_layer_idx": false,
 
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.27.4",
37
  "use_cache": true,
38
+ "vocab_size": 50260
39
  }
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf8846671ee2ba122558d69fd6bf40367ced2426b43f92e733326dc8a7f4c994
3
- size 510404157
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff1e83353ecdb77d60b49eea066735ded0ed9703edeefde41a5b24036d0c0e39
3
+ size 510407229
special_tokens_map.json CHANGED
@@ -1,4 +1,9 @@
1
  {
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<|endoftext|>",
4
  "lstrip": false,
@@ -13,6 +18,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
16
  "unk_token": {
17
  "content": "<|endoftext|>",
18
  "lstrip": false,
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<|system|>",
4
+ "<|user|>",
5
+ "<|endoftext|>"
6
+ ],
7
  "bos_token": {
8
  "content": "<|endoftext|>",
9
  "lstrip": false,
 
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
+ "pad_token": "<|endoftext|>",
22
  "unk_token": {
23
  "content": "<|endoftext|>",
24
  "lstrip": false,
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
@@ -9,7 +14,25 @@
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
- "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  "special": true
14
  }
15
  ],
@@ -39,6 +62,7 @@
39
  "continuing_subword_prefix": "",
40
  "end_of_word_suffix": "",
41
  "fuse_unk": false,
 
42
  "vocab": {
43
  "!": 0,
44
  "\"": 1,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
14
  "single_word": false,
15
  "lstrip": false,
16
  "rstrip": false,
17
+ "normalized": false,
18
+ "special": true
19
+ },
20
+ {
21
+ "id": 50257,
22
+ "content": "<|system|>",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 50258,
31
+ "content": "<|user|>",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
  "special": true
37
  }
38
  ],
 
62
  "continuing_subword_prefix": "",
63
  "end_of_word_suffix": "",
64
  "fuse_unk": false,
65
+ "byte_fallback": false,
66
  "vocab": {
67
  "!": 0,
68
  "\"": 1,