Vui Seng Chua commited on
Commit
270b51c
1 Parent(s): f9a5f98

add content

Browse files
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - squad
7
+ model-index:
8
+ - name: run05-roberta-large-squadv1.1-sl384-ds128-e2-tbs16
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # run05-roberta-large-squadv1.1-sl384-ds128-e2-tbs16
16
+
17
+ This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 3e-05
37
+ - train_batch_size: 16
38
+ - eval_batch_size: 64
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 2.0
43
+ - mixed_precision_training: Native AMP
44
+
45
+ ### Training results
46
+
47
+
48
+
49
+ ### Framework versions
50
+
51
+ - Transformers 4.18.0
52
+ - Pytorch 1.11.0+cu113
53
+ - Datasets 2.1.0
54
+ - Tokenizers 0.12.1
55
+
56
+ # Train
57
+ ```bash
58
+ python run_qa.py \
59
+ --model_name_or_path roberta-large \
60
+ --dataset_name squad \
61
+ --do_eval \
62
+ --do_train \
63
+ --evaluation_strategy steps \
64
+ --eval_steps 500 \
65
+ --learning_rate 3e-5 \
66
+ --fp16 \
67
+ --num_train_epochs 2 \
68
+ --per_device_eval_batch_size 64 \
69
+ --per_device_train_batch_size 16 \
70
+ --max_seq_length 384 \
71
+ --doc_stride 128 \
72
+ --save_steps 1000 \
73
+ --logging_steps 1 \
74
+ --overwrite_output_dir \
75
+ --run_name $RUNID \
76
+ --output_dir $OUTDIR
77
+ ```
78
+
79
+ # Eval
80
+ ```bash
81
+ export CUDA_VISIBLE_DEVICES=0
82
+
83
+ MODEL=vuiseng9/roberta-l-squadv1.1
84
+ OUTDIR=eval-$(basename $MODEL)
85
+ WORKDIR=transformers/examples/pytorch/question-answering
86
+ cd $WORKDIR
87
+
88
+ nohup python run_qa.py \
89
+ --model_name_or_path $MODEL \
90
+ --dataset_name squad \
91
+ --do_eval \
92
+ --per_device_eval_batch_size 16 \
93
+ --max_seq_length 384 \
94
+ --doc_stride 128 \
95
+ --overwrite_output_dir \
96
+ --output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log &
97
+ ```
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_exact_match": 88.45789971617786,
4
+ "eval_f1": 94.30632459083219,
5
+ "eval_samples": 10790,
6
+ "train_loss": 0.7616665850008564,
7
+ "train_runtime": 4169.2181,
8
+ "train_samples": 88568,
9
+ "train_samples_per_second": 42.487,
10
+ "train_steps_per_second": 2.656
11
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-large",
3
+ "architectures": [
4
+ "RobertaForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.18.0",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 50265
27
+ }
eval_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_results.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_exact_match": 88.45789971617786,
4
+ "eval_f1": 94.30632459083219,
5
+ "eval_samples": 10790
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885be04aa71771d049d12c10423508ecdb4b6443415ea3443596812995bb826b
3
+ size 1417385329
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "add_prefix_space": false, "trim_offsets": true, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-large", "tokenizer_class": "RobertaTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.7616665850008564,
4
+ "train_runtime": 4169.2181,
5
+ "train_samples": 88568,
6
+ "train_samples_per_second": 42.487,
7
+ "train_steps_per_second": 2.656
8
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39147a6bac51bae479afda16858d1c769d5ca3bc514ba0589435908261d8c04a
3
+ size 3183
vocab.json ADDED
The diff for this file is too large to render. See raw diff