dima806 commited on
Commit
8f67339
1 Parent(s): c6dbc80

Upload . with huggingface_hub

Browse files
bpe.codes ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10610/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "vinai/bertweet-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "NOT ENGAGED",
16
+ "1": "ENGAGED"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "layer_norm_eps": 1e-05,
21
+ "max_position_embeddings": 130,
22
+ "model_type": "roberta",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "pad_token_id": 1,
26
+ "position_embedding_type": "absolute",
27
+ "problem_type": "single_label_classification",
28
+ "tokenizer_class": "BertweetTokenizer",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.28.1",
31
+ "type_vocab_size": 1,
32
+ "use_cache": true,
33
+ "vocab_size": 64001
34
+ }
checkpoint-10610/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2cef42ea36209d7fd619d4205f367e13896c8876d6b67c4c66f554fac251644
3
+ size 1079329925
checkpoint-10610/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d042b48a7eb187fff472b27f73a602c2b2fc5a1da20728c06a695e57abd6b524
3
+ size 539676341
checkpoint-10610/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf7ec3d584e53885d677f6188b01dbad1a9c0983c6c5aa9f6a645de58afdce6
3
+ size 14575
checkpoint-10610/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609915a9b9764097c1a3c622919f28b815079a296a6cc3b6ab969e496f8ebf94
3
+ size 627
checkpoint-10610/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10610/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d381448784682693057f6ae24ee4009087c7dc083eac1d812a1173f2df061868
3
+ size 3515
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "vinai/bertweet-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "NOT ENGAGED",
16
+ "1": "ENGAGED"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "layer_norm_eps": 1e-05,
21
+ "max_position_embeddings": 130,
22
+ "model_type": "roberta",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "pad_token_id": 1,
26
+ "position_embedding_type": "absolute",
27
+ "problem_type": "single_label_classification",
28
+ "tokenizer_class": "BertweetTokenizer",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.28.1",
31
+ "type_vocab_size": 1,
32
+ "use_cache": true,
33
+ "vocab_size": 64001
34
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d042b48a7eb187fff472b27f73a602c2b2fc5a1da20728c06a695e57abd6b524
3
+ size 539676341
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d381448784682693057f6ae24ee4009087c7dc083eac1d812a1173f2df061868
3
+ size 3515
vocab.txt ADDED
The diff for this file is too large to render. See raw diff