EC2 Default User commited on
Commit
3458e96
1 Parent(s): 6b8e815
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: cc-by-4.0
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - squad_v2
9
+ - mit_movie
10
+ model_index:
11
+ - name: bert-large-uncased-whole-word-masking-squad2-with-ner-mit-movie-with-neg-with-repeat
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: squad_v2
18
+ type: squad_v2
19
+ - task:
20
+ name: Token Classification
21
+ type: token-classification
22
+ dataset:
23
+ name: mit_movie
24
+ type: mit_movie
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # bert-large-uncased-whole-word-masking-squad2-with-ner-mit-movie-with-neg-with-repeat
31
+
32
+ This model is a fine-tuned version of [deepset/bert-large-uncased-whole-word-masking-squad2](https://huggingface.co/deepset/bert-large-uncased-whole-word-masking-squad2) on the squad_v2 and the mit_movie datasets.
33
+
34
+ ## Model description
35
+
36
+ More information needed
37
+
38
+ ## Intended uses & limitations
39
+
40
+ More information needed
41
+
42
+ ## Training and evaluation data
43
+
44
+ More information needed
45
+
46
+ ## Training procedure
47
+
48
+ ### Training hyperparameters
49
+
50
+ The following hyperparameters were used during training:
51
+ - learning_rate: 2e-05
52
+ - train_batch_size: 4
53
+ - eval_batch_size: 1
54
+ - seed: 42
55
+ - gradient_accumulation_steps: 4
56
+ - total_train_batch_size: 16
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: linear
59
+ - num_epochs: 5
60
+
61
+ ### Training results
62
+
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.8.2
68
+ - Pytorch 1.8.1+cu111
69
+ - Datasets 1.8.0
70
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.12010228272513258,
4
+ "train_runtime": 102657.6594,
5
+ "train_samples": 54577,
6
+ "train_samples_per_second": 2.658,
7
+ "train_steps_per_second": 0.166
8
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "deepset/bert-large-uncased-whole-word-masking-squad2",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "finetuning_task": "squad2",
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 4096,
14
+ "language": "english",
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "name": "Bert",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "position_embedding_type": "absolute",
24
+ "transformers_version": "4.8.2",
25
+ "type_vocab_size": 2,
26
+ "use_cache": true,
27
+ "vocab_size": 30522
28
+ }
log.log ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ Training dataset length:
2
+ 54577
3
+ Validation dataset length:
4
+ 5616
5
+ Test dataset length:
6
+ 14136
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a91b35c37c5bb0f381b10882b4d9256c79a5650107e9281f4977fd47a2a5d2
3
+ size 1336547639
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/home/ec2-user/.cache/huggingface/transformers/ef9bc2ba6867b86932babe745cbccc9822ae43e5c873d03088aee4251bd45fff.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "deepset/bert-large-uncased-whole-word-masking-squad2", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.12010228272513258,
4
+ "train_runtime": 102657.6594,
5
+ "train_samples": 54577,
6
+ "train_samples_per_second": 2.658,
7
+ "train_steps_per_second": 0.166
8
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77b39851044d01247985ad45533a84beb96f5706e0b33d95ae8426a6d579e457
3
+ size 2927
vocab.txt ADDED
The diff for this file is too large to render. See raw diff