ritwikm commited on
Commit
906e2e2
1 Parent(s): 150742a

all configs and tokenizer files from expts

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. added_tokens.json +4 -0
  3. config.json +7 -1
  4. special_tokens_map.json +24 -0
  5. vocab.json +0 -0
README.md CHANGED
@@ -14,6 +14,6 @@ We first fine-tuned gpt-2 for 1 epoch on the English corpus (after cleaning*) of
14
 
15
  Since the above dataset contains news regarding Indian subcontinent. We thought that with this fine-tuning, the model will get familiary with India specific terms.
16
 
17
- Then we further fine-tuned this model on sentences written by Gandhi.
18
 
19
  *Before cleaning #sents = 54M, after cleaning 42M. We simply took those English sentences which ends with a full-stop.
 
14
 
15
  Since the above dataset contains news regarding Indian subcontinent. We thought that with this fine-tuning, the model will get familiary with India specific terms.
16
 
17
+ Then we further fine-tuned this model on sentences written by Gandhi (for 3 epochs).
18
 
19
  *Before cleaning #sents = 54M, after cleaning 42M. We simply took those English sentences which ends with a full-stop.
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|pad|>": 50258,
3
+ "<|startoftext|>": 50257
4
+ }
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "activation_function": "gelu_new",
3
  "architectures": [
4
  "GPT2LMHeadModel"
@@ -7,15 +8,18 @@
7
  "bos_token_id": 50256,
8
  "embd_pdrop": 0.1,
9
  "eos_token_id": 50256,
 
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
12
  "model_type": "gpt2",
13
  "n_ctx": 1024,
14
  "n_embd": 768,
15
  "n_head": 12,
 
16
  "n_layer": 12,
17
  "n_positions": 1024,
18
  "resid_pdrop": 0.1,
 
19
  "summary_activation": null,
20
  "summary_first_dropout": 0.1,
21
  "summary_proj_to_labels": true,
@@ -27,5 +31,7 @@
27
  "max_length": 50
28
  }
29
  },
30
- "vocab_size": 50257
 
 
31
  }
 
1
  {
2
+ "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
  "n_ctx": 1024,
16
  "n_embd": 768,
17
  "n_head": 12,
18
+ "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
21
  "resid_pdrop": 0.1,
22
+ "scale_attn_weights": true,
23
  "summary_activation": null,
24
  "summary_first_dropout": 0.1,
25
  "summary_proj_to_labels": true,
 
31
  "max_length": 50
32
  }
33
  },
34
+ "transformers_version": "4.8.2",
35
+ "use_cache": true,
36
+ "vocab_size": 50259
37
  }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|pad|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff