jinymusim commited on
Commit
2ce187c
1 Parent(s): fab1244

Upload 8 files

Browse files

More training on separation

config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "microsoft/DialoGPT-small",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -8,6 +8,7 @@
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
 
11
  "initializer_range": 0.02,
12
  "layer_norm_epsilon": 1e-05,
13
  "model_type": "gpt2",
@@ -17,7 +18,8 @@
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
20
- "pad_token_id": 50256,
 
21
  "reorder_and_upcast_attn": false,
22
  "resid_pdrop": 0.1,
23
  "scale_attn_by_inverse_layer_idx": false,
@@ -28,12 +30,13 @@
28
  "summary_type": "cls_index",
29
  "summary_use_proj": true,
30
  "task_specific_params": {
31
- "conversational": {
32
- "max_length": 1000
 
33
  }
34
  },
35
  "torch_dtype": "float32",
36
- "transformers_version": "4.28.1",
37
  "use_cache": true,
38
- "vocab_size": 50261
39
  }
 
1
  {
2
+ "_name_or_path": "jinymusim/gpt-czech-poet",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
+ "eos_token_ids": 0,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
 
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
  "reorder_and_upcast_attn": false,
24
  "resid_pdrop": 0.1,
25
  "scale_attn_by_inverse_layer_idx": false,
 
30
  "summary_type": "cls_index",
31
  "summary_use_proj": true,
32
  "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50
36
  }
37
  },
38
  "torch_dtype": "float32",
39
+ "transformers_version": "4.29.2",
40
  "use_cache": true,
41
+ "vocab_size": 50257
42
  }
generation_config.json CHANGED
@@ -2,5 +2,6 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
- "transformers_version": "4.28.1"
 
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 50256,
4
  "eos_token_id": 50256,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.29.2"
7
  }
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:813dc820124ee5bbf050b6d54ef5ee549cc2f54d69138ab1ad740c352b5e0c7d
3
- size 510410301
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd9305b647f3049770f9661cb3acb2aec7b0a2f0d6eda193e90a8e28379f42dc
3
+ size 510398013
special_tokens_map.json CHANGED
@@ -1,30 +1,6 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|system|>",
4
- "<|user|>",
5
- "<|endoftext|>",
6
- "<|belive|>"
7
- ],
8
- "bos_token": {
9
- "content": "<|endoftext|>",
10
- "lstrip": false,
11
- "normalized": true,
12
- "rstrip": false,
13
- "single_word": false
14
- },
15
- "eos_token": {
16
- "content": "<|endoftext|>",
17
- "lstrip": false,
18
- "normalized": true,
19
- "rstrip": false,
20
- "single_word": false
21
- },
22
  "pad_token": "<|endoftext|>",
23
- "unk_token": {
24
- "content": "<|endoftext|>",
25
- "lstrip": false,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": false
29
- }
30
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
6
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -20,7 +20,14 @@
20
  },
21
  "errors": "replace",
22
  "model_max_length": 1024,
23
- "pad_token": null,
 
 
 
 
 
 
 
24
  "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
 
20
  },
21
  "errors": "replace",
22
  "model_max_length": 1024,
23
+ "pad_token": {
24
+ "__type": "AddedToken",
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
  "tokenizer_class": "GPT2Tokenizer",
32
  "unk_token": {
33
  "__type": "AddedToken",
vocab.json CHANGED
The diff for this file is too large to render. See raw diff