Edward Gow-Smith commited on
Commit
4a54a80
1 Parent(s): 1a3d4ac

added model

Browse files
all_results.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_accuracy": 0.7639485001564026,
4
+ "eval_f1": 0.7916666666666666,
5
+ "eval_loss": 1.6960338354110718,
6
+ "eval_mem_cpu_alloc_delta": 4096,
7
+ "eval_mem_cpu_peaked_delta": 0,
8
+ "eval_mem_gpu_alloc_delta": 0,
9
+ "eval_mem_gpu_peaked_delta": 97849856,
10
+ "eval_runtime": 7.2492,
11
+ "eval_samples": 466,
12
+ "eval_samples_per_second": 64.283,
13
+ "init_mem_cpu_alloc_delta": 899153920,
14
+ "init_mem_cpu_peaked_delta": 464109568,
15
+ "init_mem_gpu_alloc_delta": 469504512,
16
+ "init_mem_gpu_peaked_delta": 0,
17
+ "train_mem_cpu_alloc_delta": 501301248,
18
+ "train_mem_cpu_peaked_delta": 0,
19
+ "train_mem_gpu_alloc_delta": 1454685696,
20
+ "train_mem_gpu_peaked_delta": 4411427840,
21
+ "train_runtime": 1293.0923,
22
+ "train_samples": 3327,
23
+ "train_samples_per_second": 0.724
24
+ }
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-cased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.6.0.dev0",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 28996
24
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_accuracy": 0.7639485001564026,
4
+ "eval_f1": 0.7916666666666666,
5
+ "eval_loss": 1.6960338354110718,
6
+ "eval_runtime": 7.2492,
7
+ "eval_samples": 466,
8
+ "eval_samples_per_second": 64.283
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
output ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ 04/26/2021 16:14:50 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: False
2
+ 04/26/2021 16:14:50 - INFO - __main__ - Training/evaluation parameters TrainingArguments(output_dir=results/, overwrite_output_dir=True, do_train=True, do_eval=True, do_predict=False, evaluation_strategy=IntervalStrategy.EPOCH, prediction_loss_only=False, per_device_train_batch_size=32, per_device_eval_batch_size=8, gradient_accumulation_steps=1, eval_accumulation_steps=None, learning_rate=2e-05, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=2.0, max_steps=-1, lr_scheduler_type=SchedulerType.LINEAR, warmup_ratio=0.0, warmup_steps=0, logging_dir=runs/Apr26_16-14-48_sharc-node100.shef.ac.uk, logging_strategy=IntervalStrategy.STEPS, logging_first_step=False, logging_steps=500, save_strategy=IntervalStrategy.STEPS, save_steps=500, save_total_limit=None, no_cuda=False, seed=0, fp16=False, fp16_opt_level=O1, fp16_backend=auto, fp16_full_eval=False, local_rank=-1, tpu_num_cores=None, tpu_metrics_debug=False, debug=False, dataloader_drop_last=False, eval_steps=500, dataloader_num_workers=0, past_index=-1, run_name=results/, disable_tqdm=False, remove_unused_columns=True, label_names=None, load_best_model_at_end=False, metric_for_best_model=None, greater_is_better=None, ignore_data_skip=False, sharded_ddp=[], deepspeed=None, label_smoothing_factor=0.0, adafactor=False, group_by_length=False, length_column_name=length, report_to=[], ddp_find_unused_parameters=None, dataloader_pin_memory=True, skip_memory_metrics=False, use_legacy_prediction_loop=False, push_to_hub=False, _n_gpu=1, mp_parameters=)
3
+ 04/26/2021 16:14:50 - INFO - __main__ - load a local file for train: EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv
4
+ 04/26/2021 16:14:50 - INFO - __main__ - load a local file for validation: EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv
5
+ 04/26/2021 16:14:51 - WARNING - datasets.builder - Using custom data configuration default-e6021d2b98beaf56
6
+ 04/26/2021 16:14:51 - WARNING - datasets.builder - Reusing dataset csv (/home/acp20eg/.cache/huggingface/datasets/csv/default-e6021d2b98beaf56/0.0.0/2dc6629a9ff6b5697d82c25b73731dd440507a69cbce8b425db50b751e8fcfd0)
7
+ 04/26/2021 16:14:58 - INFO - __main__ - Sample 1577 of the training set: {'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'input_ids': [101, 1457, 8517, 3259, 1110, 1907, 1106, 2824, 1105, 5118, 117, 9133, 117, 4417, 1113, 4555, 1120, 9786, 3460, 117, 9786, 117, 159, 4867, 1358, 117, 7986, 6060, 117, 12145, 117, 7673, 159, 15609, 3294, 119, 102, 188, 21365, 1480, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'label': 1, 'sentence1': 'Stag Night is available to watch and stream, download, buy on demand at Amazon Prime, Amazon, Vudu, Google Play, iTunes, YouTube VOD online.', 'sentence2': 'stag night', 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}.
8
+ 04/26/2021 16:14:58 - INFO - __main__ - Sample 3104 of the training set: {'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'input_ids': [101, 146, 12647, 15554, 1181, 139, 26859, 787, 188, 6134, 1106, 1231, 25665, 1103, 1291, 3225, 6534, 1105, 1106, 1840, 170, 1362, 3511, 1113, 4530, 1849, 1272, 117, 1111, 3451, 2255, 117, 1103, 4530, 1110, 4787, 119, 102, 1362, 3511, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'label': 1, 'sentence1': 'I applaud Biden’s decisions to rejoin the World Health Organization and to call a world conference on climate change because, for whatever reason, the climate is changing.', 'sentence2': 'world conference', 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}.
9
+ 04/26/2021 16:14:58 - INFO - __main__ - Sample 1722 of the training set: {'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'input_ids': [101, 3291, 15789, 27608, 6270, 131, 3291, 18312, 1569, 1844, 1933, 1209, 2025, 3154, 1104, 8999, 17157, 102, 1844, 1933, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'label': 1, 'sentence1': 'Coronavirus latest: Covid national research project will study effects of emerging mutations', 'sentence2': 'research project', 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}.
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:788923048c367ab4bc314eaf0b6d3f1e145c3e8ff99c52dc5275ac885f3868c6
3
+ size 433336585
results.txt ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"results": [{"epoch": 2.0, "eval_accuracy": 0.7510729432106018, "eval_f1": 0.7716535433070866}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
2
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.770386278629303, "eval_f1": 0.7961904761904762}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
3
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7854077219963074, "eval_f1": 0.8134328358208955}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
4
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7639485001564026, "eval_f1": 0.7916666666666666}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
5
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7596566677093506, "eval_f1": 0.7894736842105263}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
6
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7381974458694458, "eval_f1": 0.7773722627737226}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
7
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.770386278629303, "eval_f1": 0.8051001821493625}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/dev.csv", "model_name": "bert-base-cased"}
8
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.774678111076355, "eval_f1": 0.8073394495412844}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/dev.csv", "model_name": "bert-base-cased"}
9
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7575107216835022, "eval_f1": 0.79491833030853}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/dev.csv", "model_name": "bert-base-cased"}
10
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.770386278629303, "eval_f1": 0.8022181146025879}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/dev.csv", "model_name": "bert-base-cased"}
11
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.766094446182251, "eval_f1": 0.8057040998217468}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/TrueTrue-0/dev.csv", "model_name": "bert-base-cased"}
12
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.729613721370697, "eval_f1": 0.7797202797202797}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/dev.csv", "model_name": "bert-base-cased"}
13
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.716738224029541, "eval_f1": 0.7716262975778547}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/dev.csv", "model_name": "bert-base-cased"}
14
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.721030056476593, "eval_f1": 0.7719298245614035}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/dev.csv", "model_name": "bert-base-cased"}
15
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.721030056476593, "eval_f1": 0.7703180212014135}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/dev.csv", "model_name": "bert-base-cased"}
16
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.729613721370697, "eval_f1": 0.7871621621621622}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseFalse-0/dev.csv", "model_name": "bert-base-cased"}
17
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7043478488922119, "eval_f1": 0.6890243902439024}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/dev.csv", "model_name": "bert-base-cased"}
18
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.6608695387840271, "eval_f1": 0.6465256797583081}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/dev.csv", "model_name": "bert-base-cased"}
19
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7130434513092041, "eval_f1": 0.7211267605633803}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/dev.csv", "model_name": "bert-base-cased"}
20
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.6985507011413574, "eval_f1": 0.7011494252873564}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/dev.csv", "model_name": "bert-base-cased"}
21
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.686956524848938, "eval_f1": 0.6804733727810651}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-2/dev.csv", "model_name": "bert-base-cased"}
22
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7060086131095886, "eval_f1": 0.74487895716946}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-uncased"}
23
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7124463319778442, "eval_f1": 0.7572463768115942}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-uncased"}
24
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7489270567893982, "eval_f1": 0.7710371819960861}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-uncased"}
25
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.716738224029541, "eval_f1": 0.7509433962264151}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-uncased"}
26
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7403433322906494, "eval_f1": 0.7603960396039604}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-uncased"}
27
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.774678111076355, "eval_f1": 0.8128342245989305}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "roberta-base"}
28
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8025751113891602, "eval_f1": 0.8385964912280702}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "roberta-base"}
29
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7768240571022034, "eval_f1": 0.8115942028985508}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "roberta-base"}
30
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7725321650505066, "eval_f1": 0.8093525179856115}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "roberta-base"}
31
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.770386278629303, "eval_f1": 0.8085867620751341}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "roberta-base"}
32
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.774678111076355, "eval_f1": 0.8148148148148148}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "xlnet-base-cased"}
33
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7982832789421082, "eval_f1": 0.8373702422145328}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "xlnet-base-cased"}
34
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7875536680221558, "eval_f1": 0.8190127970749543}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "xlnet-base-cased"}
35
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7875536680221558, "eval_f1": 0.8272251308900523}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "xlnet-base-cased"}
36
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7939913868904114, "eval_f1": 0.8309859154929577}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "xlnet-base-cased"}
37
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7317596673965454, "eval_f1": 0.7680890538033395}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "distilbert-base-cased"}
38
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.725321888923645, "eval_f1": 0.762962962962963}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "distilbert-base-cased"}
39
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7274678349494934, "eval_f1": 0.7678244972577697}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "distilbert-base-cased"}
40
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7317596673965454, "eval_f1": 0.7680890538033395}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "distilbert-base-cased"}
41
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7446351647377014, "eval_f1": 0.7840290381125227}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "distilbert-base-cased"}
42
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.954935610294342, "eval_f1": 0.9630931458699473}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
43
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.9442059993743896, "eval_f1": 0.954225352112676}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
44
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.95064377784729, "eval_f1": 0.9591474245115453}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
45
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.954935610294342, "eval_f1": 0.9630931458699473}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
46
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.9399141669273376, "eval_f1": 0.9513888888888888}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
47
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8948497772216797, "eval_f1": 0.9165247018739353}], "seed": 0, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev_short.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
48
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8776823878288269, "eval_f1": 0.9042016806722689}], "seed": 1, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev_short.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
49
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8690987229347229, "eval_f1": 0.8967851099830795}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev_short.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
50
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8991416096687317, "eval_f1": 0.9176882661996497}], "seed": 3, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev_short.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
51
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.8991416096687317, "eval_f1": 0.9185441941074524}], "seed": 4, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train_from_dev_short.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "results/xlnet-base-cased-FalseTrue0-1"}
52
+ {"results": [{"epoch": 9.0, "eval_accuracy": 0.7639485001564026, "eval_f1": 0.7916666666666666}], "seed": 2, "train_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/train.csv", "dev_path": "EMNLP-2021/data/v3/SentenceClassificationData/FalseTrue-0/dev.csv", "model_name": "bert-base-cased"}
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f8c1c0bc2854d1af911a8550288c1258af5ba50277f3a5c829b98eb86fc5646
3
+ size 798011
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-cased"}
train_results.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "train_runtime": 1293.0923,
4
+ "train_samples": 3327,
5
+ "train_samples_per_second": 0.724
6
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcf79799b0ce331e472be6a3964f5690ede1c85ab943668b732647a8ef137b32
3
+ size 2415
vocab.json ADDED
The diff for this file is too large to render. See raw diff
vocab.txt ADDED
The diff for this file is too large to render. See raw diff