clhuang commited on
Commit
52b6d4c
1 Parent(s): 66c789c

update from clhuang

Browse files
optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:09f484ecf330695cf604ab9c5ab2ae7161156d9423cc36c594fd92334287e322
3
- size 84466563
 
 
 
 
rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfbafeb8298acbb83505265c84391943881508a522717de6681e875a516d7cdb
3
- size 14503
 
 
 
 
scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a40ef085858cb95701334037a0232e17223889497e5e8d68b6d06c5b178967bb
3
- size 623
 
 
 
 
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "do_basic_tokenize": true, "never_split": null, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-chinese", "tokenizer_class": "BertTokenizer"}
trainer_state.json DELETED
@@ -1,82 +0,0 @@
1
- {
2
- "best_metric": 0.2616243362426758,
3
- "best_model_checkpoint": "./checkpoints/checkpoint-1964",
4
- "epoch": 4.0,
5
- "global_step": 1964,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 1.0,
12
- "eval_accuracy": 0.8884644766997708,
13
- "eval_f1": 0.8884644766997708,
14
- "eval_loss": 0.4412623643875122,
15
- "eval_precision": 0.8884644766997708,
16
- "eval_recall": 0.8884644766997708,
17
- "eval_runtime": 43.6404,
18
- "eval_samples_per_second": 59.99,
19
- "eval_steps_per_second": 1.879,
20
- "step": 491
21
- },
22
- {
23
- "epoch": 1.02,
24
- "learning_rate": 2e-05,
25
- "loss": 1.3111,
26
- "step": 500
27
- },
28
- {
29
- "epoch": 2.0,
30
- "eval_accuracy": 0.9037433155080213,
31
- "eval_f1": 0.9037433155080212,
32
- "eval_loss": 0.31344300508499146,
33
- "eval_precision": 0.9037433155080213,
34
- "eval_recall": 0.9037433155080213,
35
- "eval_runtime": 43.7135,
36
- "eval_samples_per_second": 59.89,
37
- "eval_steps_per_second": 1.876,
38
- "step": 982
39
- },
40
- {
41
- "epoch": 2.04,
42
- "learning_rate": 1.591169255928046e-05,
43
- "loss": 0.3547,
44
- "step": 1000
45
- },
46
- {
47
- "epoch": 3.0,
48
- "eval_accuracy": 0.9113827349121467,
49
- "eval_f1": 0.9113827349121467,
50
- "eval_loss": 0.2822723388671875,
51
- "eval_precision": 0.9113827349121467,
52
- "eval_recall": 0.9113827349121467,
53
- "eval_runtime": 43.7763,
54
- "eval_samples_per_second": 59.804,
55
- "eval_steps_per_second": 1.873,
56
- "step": 1473
57
- },
58
- {
59
- "epoch": 3.05,
60
- "learning_rate": 1.1823385118560918e-05,
61
- "loss": 0.2375,
62
- "step": 1500
63
- },
64
- {
65
- "epoch": 4.0,
66
- "eval_accuracy": 0.9182582123758595,
67
- "eval_f1": 0.9182582123758595,
68
- "eval_loss": 0.2616243362426758,
69
- "eval_precision": 0.9182582123758595,
70
- "eval_recall": 0.9182582123758595,
71
- "eval_runtime": 43.7951,
72
- "eval_samples_per_second": 59.778,
73
- "eval_steps_per_second": 1.872,
74
- "step": 1964
75
- }
76
- ],
77
- "max_steps": 2946,
78
- "num_train_epochs": 6,
79
- "total_flos": 1100711158866000.0,
80
- "trial_name": null,
81
- "trial_params": null
82
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:de0b1abc9b0f344d42c820704816e67924b2521c28a82621417365ae1e1662de
3
- size 3183
 
 
 
 
vocab.txt ADDED
The diff for this file is too large to render. See raw diff