cardiffnlp commited on
Commit
dc0509f
1 Parent(s): 0dfb302

Adding bertweet classifier

Browse files
Files changed (8) hide show
  1. README.md +0 -0
  2. added_tokens.json +1 -0
  3. bpe.codes +0 -0
  4. config.json +36 -0
  5. pytorch_model.bin +3 -0
  6. tf_model.h5 +3 -0
  7. tokenizer_config.json +1 -0
  8. vocab.txt +0 -0
README.md ADDED
File without changes
added_tokens.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<mask>": 64000}
bpe.codes ADDED
The diff for this file is too large to render. See raw diff
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/jupyter/misc/tweeteval/TweetEval_models_bertweet/original/bertweet-base-emotion/",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3
26
+ },
27
+ "layer_norm_eps": 1e-05,
28
+ "max_position_embeddings": 130,
29
+ "model_type": "roberta",
30
+ "num_attention_heads": 12,
31
+ "num_hidden_layers": 12,
32
+ "pad_token_id": 1,
33
+ "tokenizer_class": "BertweetTokenizer",
34
+ "type_vocab_size": 1,
35
+ "vocab_size": 64001
36
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2245899d0bf8b89e8a326138c1f9bad8db60d9c079ad0a6476cfeee2a2e8ef28
3
+ size 539696841
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5a62576da34104cdca1ecf258f32db296acdf47abadeb7a366dde8777532c53
3
+ size 542250720
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"normalization": false, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "do_lower_case": false, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "/home/jupyter/TMP_finetune_tweeteval/model/"}
vocab.txt ADDED
The diff for this file is too large to render. See raw diff