jinymusim commited on
Commit
9bd79dd
·
1 Parent(s): a4ee1e8

Upload 6 files

Browse files
config.json CHANGED
@@ -37,7 +37,7 @@
37
  }
38
  },
39
  "torch_dtype": "float32",
40
- "transformers_version": "4.33.1",
41
  "use_cache": true,
42
- "vocab_size": 50257
43
  }
 
37
  }
38
  },
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.34.1",
41
  "use_cache": true,
42
+ "vocab_size": 24574
43
  }
generation_config.json CHANGED
@@ -4,5 +4,5 @@
4
  "eos_token_id": 50256,
5
  "output_hidden_states": true,
6
  "pad_token_id": 0,
7
- "transformers_version": "4.33.1"
8
  }
 
4
  "eos_token_id": 50256,
5
  "output_hidden_states": true,
6
  "pad_token_id": 0,
7
+ "transformers_version": "4.34.1"
8
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8371459fab01373807d193e942e02589f967e5f918df04db40fc6964b4ca5c37
3
- size 497807197
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c9074adefd7b49f237c345c309e01b7d699a023c9ba7a49edb6a205a1f6a90
3
+ size 418909530
special_tokens_map.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "pad_token": "<|endoftext|>",
5
- "unk_token": "<|endoftext|>"
6
  }
 
1
  {
2
+ "eos_token": "<|EOS|>",
3
+ "pad_token": "<|PAD|>",
4
+ "unk_token": "<|UNK|>"
 
5
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,40 +1,34 @@
1
  {
2
- "add_bos_token": false,
3
- "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  },
12
  "clean_up_tokenization_spaces": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "<|endoftext|>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "errors": "replace",
22
  "model_max_length": 1024,
23
- "pad_token": {
24
- "__type": "AddedToken",
25
- "content": "<|endoftext|>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
- "tokenizer_class": "GPT2Tokenizer",
32
- "unk_token": {
33
- "__type": "AddedToken",
34
- "content": "<|endoftext|>",
35
- "lstrip": false,
36
- "normalized": true,
37
- "rstrip": false,
38
- "single_word": false
39
- }
40
  }
 
1
  {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<|EOS|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<|PAD|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<|UNK|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
  },
28
  "clean_up_tokenization_spaces": true,
29
+ "eos_token": "<|EOS|>",
 
 
 
 
 
 
 
 
30
  "model_max_length": 1024,
31
+ "pad_token": "<|PAD|>",
32
+ "tokenizer_class": "PreTrainedTokenizerFast",
33
+ "unk_token": "<|UNK|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  }