konverner commited on
Commit
bbb7bc1
1 Parent(s): e280e93

Push model using huggingface_hub.

Browse files
config.json CHANGED
@@ -1,34 +1,24 @@
1
  {
2
- "_name_or_path": "/root/.cache/torch/sentence_transformers/konverner_due_sentiment/",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertModel"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "finetuning_task": "sst-2",
11
- "hidden_dim": 3072,
12
- "id2label": {
13
- "0": "NEGATIVE",
14
- "1": "POSITIVE"
15
- },
16
  "initializer_range": 0.02,
17
- "label2id": {
18
- "NEGATIVE": 0,
19
- "POSITIVE": 1
20
- },
21
- "max_position_embeddings": 512,
22
- "model_type": "distilbert",
23
- "n_heads": 12,
24
- "n_layers": 6,
25
- "output_past": true,
26
- "pad_token_id": 0,
27
- "qa_dropout": 0.1,
28
- "seq_classif_dropout": 0.2,
29
- "sinusoidal_pos_embds": false,
30
- "tie_weights_": true,
31
  "torch_dtype": "float32",
32
- "transformers_version": "4.30.2",
33
- "vocab_size": 30522
34
  }
 
1
  {
2
+ "_name_or_path": "C:\\Users\\konst\\PycharmProjects\\due_pipeline\\models\\due_retail_25\\",
 
3
  "architectures": [
4
+ "MPNetModel"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
 
 
 
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "mpnet",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "relative_attention_num_buckets": 32,
 
 
 
 
 
 
21
  "torch_dtype": "float32",
22
+ "transformers_version": "4.27.3",
23
+ "vocab_size": 30527
24
  }
config_sentence_transformers.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "__version__": {
3
- "sentence_transformers": "2.2.2",
4
- "transformers": "4.30.2",
5
- "pytorch": "2.0.1+cu118"
6
  }
7
  }
 
1
  {
2
  "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.7.0",
5
+ "pytorch": "1.9.0+cu102"
6
  }
7
  }
model_head.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa5d67404ee478ae72e7836b35add6580a39847741aba1646ce1a7d164d5d17b
3
- size 6991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf870f4bc6282a0a007d9ed008cfd996f4219ac182ec7e032532881cb9d3488
3
+ size 154831
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5c04e4bb8b2aacc9a97b71d58f85e9ec69089f500643998ef239764dddca2cc
3
- size 265484701
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca441fa0a42f942dda9486663a7d8db424521ad476a3d873030f47e96841548e
3
+ size 438013677
special_tokens_map.json CHANGED
@@ -1,7 +1,15 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
6
  "unk_token": "[UNK]"
7
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
  "unk_token": "[UNK]"
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,15 +1,67 @@
1
  {
 
 
 
 
 
 
 
 
2
  "clean_up_tokenization_spaces": true,
3
- "cls_token": "[CLS]",
 
 
 
 
 
 
 
4
  "do_basic_tokenize": true,
5
  "do_lower_case": true,
6
- "mask_token": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "model_max_length": 512,
8
  "never_split": null,
9
- "pad_token": "[PAD]",
10
- "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  "strip_accents": null,
12
  "tokenize_chinese_chars": true,
13
- "tokenizer_class": "DistilBertTokenizer",
14
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
15
  }
 
1
  {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
  "clean_up_tokenization_spaces": true,
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
  "do_basic_tokenize": true,
20
  "do_lower_case": true,
21
+ "eos_token": {
22
+ "__type": "AddedToken",
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "mask_token": {
30
+ "__type": "AddedToken",
31
+ "content": "<mask>",
32
+ "lstrip": true,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
  "model_max_length": 512,
38
  "never_split": null,
39
+ "pad_token": {
40
+ "__type": "AddedToken",
41
+ "content": "<pad>",
42
+ "lstrip": false,
43
+ "normalized": true,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "sep_token": {
48
+ "__type": "AddedToken",
49
+ "content": "</s>",
50
+ "lstrip": false,
51
+ "normalized": true,
52
+ "rstrip": false,
53
+ "single_word": false
54
+ },
55
+ "special_tokens_map_file": "C:\\Users\\konst\\PycharmProjects\\due_pipeline\\models\\due_retail_25\\special_tokens_map.json",
56
  "strip_accents": null,
57
  "tokenize_chinese_chars": true,
58
+ "tokenizer_class": "MPNetTokenizer",
59
+ "unk_token": {
60
+ "__type": "AddedToken",
61
+ "content": "[UNK]",
62
+ "lstrip": false,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false
66
+ }
67
  }
vocab.txt CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  [PAD]
2
  [unused0]
3
  [unused1]
@@ -30520,3 +30524,4 @@ necessitated
30520
  ##:
30521
  ##?
30522
  ##~
 
 
1
+ <s>
2
+ <pad>
3
+ </s>
4
+ <unk>
5
  [PAD]
6
  [unused0]
7
  [unused1]
 
30524
  ##:
30525
  ##?
30526
  ##~
30527
+ <mask>