Ahmed007 commited on
Commit
905790f
1 Parent(s): ae3fd52

gpt2-arabic-poet_v2

Browse files
config.json CHANGED
@@ -5,15 +5,13 @@
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
- "do_sample": true,
10
  "embd_pdrop": 0.1,
11
- "eos_token_id": 50256,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
- "max_length": 50,
15
  "model_type": "gpt2",
16
- "n_ctx": 1024,
17
  "n_embd": 768,
18
  "n_head": 12,
19
  "n_inner": null,
@@ -37,5 +35,5 @@
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.32.0.dev0",
39
  "use_cache": true,
40
- "vocab_size": 50257
41
  }
 
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
 
9
  "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
  "initializer_range": 0.02,
12
  "layer_norm_epsilon": 1e-05,
 
13
  "model_type": "gpt2",
14
+ "n_ctx": 512,
15
  "n_embd": 768,
16
  "n_head": 12,
17
  "n_inner": null,
 
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.32.0.dev0",
37
  "use_cache": true,
38
+ "vocab_size": 50000
39
  }
generation_config.json CHANGED
@@ -1,8 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 50256,
4
- "do_sample": true,
5
- "eos_token_id": 50256,
6
- "max_length": 50,
7
  "transformers_version": "4.32.0.dev0"
8
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
 
 
5
  "transformers_version": "4.32.0.dev0"
6
  }
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6dd025f89bb3242df6e68725cc40e69700a35cb8162795dd14142a7c1aef0e3
3
- size 497807197
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e672f70a410ede4e5430b9416d953479562bf1973d7e980a46a4d73ae18a9a63
3
+ size 497017693
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,40 +1,9 @@
1
  {
2
- "add_bos_token": false,
3
  "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
  "clean_up_tokenization_spaces": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "<|endoftext|>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "errors": "replace",
22
- "model_max_length": 1000000000000000019884624838656,
23
- "pad_token": {
24
- "__type": "AddedToken",
25
- "content": "<|endoftext|>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
  "tokenizer_class": "GPT2Tokenizer",
32
- "unk_token": {
33
- "__type": "AddedToken",
34
- "content": "<|endoftext|>",
35
- "lstrip": false,
36
- "normalized": true,
37
- "rstrip": false,
38
- "single_word": false
39
- }
40
  }
 
1
  {
 
2
  "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
 
 
 
 
 
 
 
4
  "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1024,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
9
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d775d4854a407d5684db531f7f00a8bacc5b5fd268beb0827b7b91272d4a40b
3
  size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d44834e0a5d6bf034437b7bca2c9ab4eb809a1ca266f0877d0a3e5c1e463b033
3
  size 3963
vocab.json CHANGED
The diff for this file is too large to render. See raw diff