ali2066 commited on
Commit
c9c1111
1 Parent(s): c052204

End of training

Browse files
config.json CHANGED
@@ -1,35 +1,29 @@
1
  {
2
- "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
3
- "activation": "gelu",
4
  "architectures": [
5
- "DistilBertForSequenceClassification"
6
  ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "finetuning_task": "sst-2",
11
- "hidden_dim": 3072,
12
- "id2label": {
13
- "0": "NEGATIVE",
14
- "1": "POSITIVE"
15
- },
16
  "initializer_range": 0.02,
17
- "label2id": {
18
- "NEGATIVE": 0,
19
- "POSITIVE": 1
20
- },
21
- "max_position_embeddings": 512,
22
- "model_type": "distilbert",
23
- "n_heads": 12,
24
- "n_layers": 6,
25
- "output_past": true,
26
- "pad_token_id": 0,
27
  "problem_type": "single_label_classification",
28
- "qa_dropout": 0.1,
29
- "seq_classif_dropout": 0.2,
30
- "sinusoidal_pos_embds": false,
31
- "tie_weights_": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.15.0",
34
- "vocab_size": 30522
 
 
35
  }
 
1
  {
2
+ "_name_or_path": "cardiffnlp/twitter-roberta-base",
 
3
  "architectures": [
4
+ "RobertaForSequenceClassification"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
 
14
  "initializer_range": 0.02,
15
+ "intermediate_size": 3072,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 514,
18
+ "model_type": "roberta",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
 
 
23
  "problem_type": "single_label_classification",
 
 
 
 
24
  "torch_dtype": "float32",
25
  "transformers_version": "4.15.0",
26
+ "type_vocab_size": 1,
27
+ "use_cache": true,
28
+ "vocab_size": 50265
29
  }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5b321a2cd09972c7099721365c3f4e0caa2af551ade7419b9177229d36a3002
3
- size 267860081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6fa7749601c6926ee24a27446c3b06c3237d90dd086c53c7a9dd9a7007456ca
3
+ size 498674093
runs/May02_15-12-38_bb8-lix.polytechnique.fr/events.out.tfevents.1651497263.bb8-lix.polytechnique.fr CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cebfff8e5096a817ab39118cdc854db2cf3dbb1116ad4f1afa56a53e80e6e792
3
- size 984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8a26e6c7423d9b5f959bf926c99f3ba2c8255c9824874728beb572902a843f9
3
+ size 1456
runs/May02_15-33-38_bb8-lix.polytechnique.fr/1651498423.2946208/events.out.tfevents.1651498423.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0730ebb86d7e2c7783f98527f71ce081272d42789a7ef1106ddb70a69bff4308
3
+ size 4746
runs/May02_15-33-38_bb8-lix.polytechnique.fr/events.out.tfevents.1651498423.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:465fb7d16475870b1541cdef0f9b75178e0e9b76105c1c6c566b300fbc2cbe12
3
+ size 6248
runs/May02_15-33-38_bb8-lix.polytechnique.fr/events.out.tfevents.1651498608.bb8-lix.polytechnique.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d55622431a22f709dea2cf93b33aa9bd34b5040cdd833bc496c954b2ffa1b01d
3
+ size 984
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased-finetuned-sst-2-english", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "DistilBertTokenizer"}
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "cardiffnlp/twitter-roberta-base", "tokenizer_class": "RobertaTokenizer"}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:511072de6ba2f5980a50daea03bf0ff6803b11adfa8e7b4fb85dc922efb8b00c
3
  size 3119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a729db31536bd2a00a6b7d6e7b8cc0bd68fe306bdf0b77fd40d02dea6346fd4c
3
  size 3119
vocab.json ADDED
The diff for this file is too large to render. See raw diff