koziev ilya commited on
Commit
24a54b3
1 Parent(s): b04856c

small and updated version of the model: 125mln parameters, extended dataset

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ widget:
16
 
17
  ## Russian Chit-chat with common sence reasoning
18
 
19
- Модель является ядром прототипа [диалоговой системы](https://github.com/Koziev/chatbot). У нее есть 2 основные функции.
20
 
21
  Первая функция - генерация реплик чит-чата. В качестве затравки подается история диалога (предшествующие несколько реплик, до 10).
22
 
16
 
17
  ## Russian Chit-chat with common sence reasoning
18
 
19
+ Модель является ядром прототипа [диалоговой системы](https://github.com/Koziev/chatbot) с двумя основными функциями.
20
 
21
  Первая функция - генерация реплик чит-чата. В качестве затравки подается история диалога (предшествующие несколько реплик, до 10).
22
 
added_tokens.json CHANGED
@@ -1 +1,3 @@
1
- {"<|endoftext|>": 50257}
 
 
1
+ {
2
+ "<|endoftext|>": 50257
3
+ }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "sberbank-ai/rugpt3medium_based_on_gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -8,24 +8,16 @@
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
- "id2label": {
12
- "0": "LABEL_0"
13
- },
14
  "initializer_range": 0.02,
15
- "label2id": {
16
- "LABEL_0": 0
17
- },
18
  "layer_norm_epsilon": 1e-05,
19
  "model_type": "gpt2",
20
  "n_ctx": 2048,
21
- "n_embd": 1024,
22
- "n_head": 16,
23
  "n_inner": null,
24
- "n_layer": 24,
25
  "n_positions": 2048,
26
- "n_special": 0,
27
- "output_past": true,
28
- "predict_special_tokens": true,
29
  "reorder_and_upcast_attn": false,
30
  "resid_pdrop": 0.1,
31
  "scale_attn_by_inverse_layer_idx": false,
@@ -36,7 +28,7 @@
36
  "summary_type": "cls_index",
37
  "summary_use_proj": true,
38
  "torch_dtype": "float32",
39
- "transformers_version": "4.19.4",
40
  "use_cache": true,
41
  "vocab_size": 50258
42
  }
1
  {
2
+ "_name_or_path": "sberbank-ai/rugpt3small_based_on_gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
 
 
12
  "initializer_range": 0.02,
 
 
 
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
  "n_ctx": 2048,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
  "n_inner": null,
19
+ "n_layer": 12,
20
  "n_positions": 2048,
 
 
 
21
  "reorder_and_upcast_attn": false,
22
  "resid_pdrop": 0.1,
23
  "scale_attn_by_inverse_layer_idx": false,
28
  "summary_type": "cls_index",
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
+ "transformers_version": "4.21.3",
32
  "use_cache": true,
33
  "vocab_size": 50258
34
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eafb5c210ea5fef7c9ffebd0c98d6053e1d8795ebae117d7ba41efd98bbedb44
3
- size 1524262745
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98ae88bfbed22ba3fb913bec46ae4a206825bf67b240af7057f88315a3090b90
3
+ size 551296803
special_tokens_map.json CHANGED
@@ -1 +1,12 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<pad>"}
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<pad>",
5
+ "unk_token": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
tokenizer_config.json CHANGED
@@ -1 +1,33 @@
1
- {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "sberbank-ai/rugpt3medium_based_on_gpt2", "errors": "replace", "pad_token": null, "add_bos_token": false, "tokenizer_class": "GPT2Tokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "name_or_path": "sberbank-ai/rugpt3small_based_on_gpt2",
22
+ "pad_token": null,
23
+ "special_tokens_map_file": null,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }