Upload . with huggingface_hub
Browse files- checkpoint-26010/config.json +30 -0
- checkpoint-26010/optimizer.pt +3 -0
- checkpoint-26010/pytorch_model.bin +3 -0
- checkpoint-26010/rng_state.pth +3 -0
- checkpoint-26010/scheduler.pt +3 -0
- checkpoint-26010/trainer_state.json +0 -0
- checkpoint-26010/training_args.bin +3 -0
- checkpoint-52020/config.json +30 -0
- checkpoint-52020/optimizer.pt +3 -0
- checkpoint-52020/pytorch_model.bin +3 -0
- checkpoint-52020/rng_state.pth +3 -0
- checkpoint-52020/scheduler.pt +3 -0
- checkpoint-52020/trainer_state.json +0 -0
- checkpoint-52020/training_args.bin +3 -0
- checkpoint-78030/config.json +30 -0
- checkpoint-78030/optimizer.pt +3 -0
- checkpoint-78030/pytorch_model.bin +3 -0
- checkpoint-78030/rng_state.pth +3 -0
- checkpoint-78030/scheduler.pt +3 -0
- checkpoint-78030/trainer_state.json +0 -0
- checkpoint-78030/training_args.bin +3 -0
- config.json +30 -0
- pytorch_model.bin +3 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
checkpoint-26010/config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-cased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "NOT ENGAGING TITLE",
|
13 |
+
"1": "ENGAGING TITLE"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"max_position_embeddings": 512,
|
17 |
+
"model_type": "distilbert",
|
18 |
+
"n_heads": 12,
|
19 |
+
"n_layers": 6,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"problem_type": "single_label_classification",
|
23 |
+
"qa_dropout": 0.1,
|
24 |
+
"seq_classif_dropout": 0.2,
|
25 |
+
"sinusoidal_pos_embds": false,
|
26 |
+
"tie_weights_": true,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.28.1",
|
29 |
+
"vocab_size": 28996
|
30 |
+
}
|
checkpoint-26010/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b400a5e8265e0425b342ce7da83308e315a5b42c952df7a86d1debaf99819ed
|
3 |
+
size 526325317
|
checkpoint-26010/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10ab4e11aa610a801679d44622825119a65c04d3a6548b6babf3cc95c6de2131
|
3 |
+
size 263167661
|
checkpoint-26010/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e41ad35fecbf67751755af929347771f4c35b3dbceb2dc1eccc86ba2fd9dfa9
|
3 |
+
size 14575
|
checkpoint-26010/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fb9ae8bd8541e4af82f8f18ab99d1faa512f39ee9815455b6b0628bae91a193
|
3 |
+
size 627
|
checkpoint-26010/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-26010/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:692efe535ef835a02d6284d6d738b9b71f2415882953fc7c785dc1e433e248bc
|
3 |
+
size 3579
|
checkpoint-52020/config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-cased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "NOT ENGAGING TITLE",
|
13 |
+
"1": "ENGAGING TITLE"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"max_position_embeddings": 512,
|
17 |
+
"model_type": "distilbert",
|
18 |
+
"n_heads": 12,
|
19 |
+
"n_layers": 6,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"problem_type": "single_label_classification",
|
23 |
+
"qa_dropout": 0.1,
|
24 |
+
"seq_classif_dropout": 0.2,
|
25 |
+
"sinusoidal_pos_embds": false,
|
26 |
+
"tie_weights_": true,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.28.1",
|
29 |
+
"vocab_size": 28996
|
30 |
+
}
|
checkpoint-52020/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8ce2a992646091c8a59b97965f9f397dde6dbfc173b614a887015e7482c426c
|
3 |
+
size 526325317
|
checkpoint-52020/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8dd1a014aef256c970fb55aa424104d5e3e400376dadc1fd956c760e6ec80a0
|
3 |
+
size 263167661
|
checkpoint-52020/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16aea2f9754bcf08d1c9a78455ff1c482945abef0e8c2a4d3a4dffd2e8930447
|
3 |
+
size 14575
|
checkpoint-52020/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6255e4ddbc7a5dc3fee5cf98ae792ed10d638ee588abdc968975e2ee3ac4dce6
|
3 |
+
size 627
|
checkpoint-52020/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-52020/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:692efe535ef835a02d6284d6d738b9b71f2415882953fc7c785dc1e433e248bc
|
3 |
+
size 3579
|
checkpoint-78030/config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-cased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "NOT ENGAGING TITLE",
|
13 |
+
"1": "ENGAGING TITLE"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"max_position_embeddings": 512,
|
17 |
+
"model_type": "distilbert",
|
18 |
+
"n_heads": 12,
|
19 |
+
"n_layers": 6,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"problem_type": "single_label_classification",
|
23 |
+
"qa_dropout": 0.1,
|
24 |
+
"seq_classif_dropout": 0.2,
|
25 |
+
"sinusoidal_pos_embds": false,
|
26 |
+
"tie_weights_": true,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.28.1",
|
29 |
+
"vocab_size": 28996
|
30 |
+
}
|
checkpoint-78030/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:26091939566177b6fcdc95a00a6861e2390cf96ae1786d97c96200fb7c661def
|
3 |
+
size 526325509
|
checkpoint-78030/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0d2f7e7832eacb2c733fe751415d81fc9ce6b3591f406f039ffd34754666237
|
3 |
+
size 263167661
|
checkpoint-78030/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7707fe9127020c3c75c5db0abfd90589dc0302d02cf57c15b3c9cb7da28f898d
|
3 |
+
size 14575
|
checkpoint-78030/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b34aaf4fe8c69925f75aacd0a07d43dd898c7bcd7d96e9599a22818ee52cd0c4
|
3 |
+
size 627
|
checkpoint-78030/trainer_state.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-78030/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:692efe535ef835a02d6284d6d738b9b71f2415882953fc7c785dc1e433e248bc
|
3 |
+
size 3579
|
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-cased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "NOT ENGAGING TITLE",
|
13 |
+
"1": "ENGAGING TITLE"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"max_position_embeddings": 512,
|
17 |
+
"model_type": "distilbert",
|
18 |
+
"n_heads": 12,
|
19 |
+
"n_layers": 6,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"problem_type": "single_label_classification",
|
23 |
+
"qa_dropout": 0.1,
|
24 |
+
"seq_classif_dropout": 0.2,
|
25 |
+
"sinusoidal_pos_embds": false,
|
26 |
+
"tie_weights_": true,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.28.1",
|
29 |
+
"vocab_size": 28996
|
30 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0d2f7e7832eacb2c733fe751415d81fc9ce6b3591f406f039ffd34754666237
|
3 |
+
size 263167661
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:692efe535ef835a02d6284d6d738b9b71f2415882953fc7c785dc1e433e248bc
|
3 |
+
size 3579
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|