desonglll commited on
Commit
ead62c6
0 Parent(s):

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +59 -0
  2. README.md +87 -0
  3. all_results.json +18 -0
  4. checkpoint-1000/config.json +49 -0
  5. checkpoint-1000/model.safetensors +3 -0
  6. checkpoint-1000/optimizer.pt +3 -0
  7. checkpoint-1000/rng_state.pth +3 -0
  8. checkpoint-1000/scheduler.pt +3 -0
  9. checkpoint-1000/special_tokens_map.json +7 -0
  10. checkpoint-1000/tokenizer.json +0 -0
  11. checkpoint-1000/tokenizer_config.json +55 -0
  12. checkpoint-1000/trainer_state.json +47 -0
  13. checkpoint-1000/training_args.bin +3 -0
  14. checkpoint-1000/vocab.txt +0 -0
  15. checkpoint-1500/config.json +49 -0
  16. checkpoint-1500/model.safetensors +3 -0
  17. checkpoint-1500/optimizer.pt +3 -0
  18. checkpoint-1500/rng_state.pth +3 -0
  19. checkpoint-1500/scheduler.pt +3 -0
  20. checkpoint-1500/special_tokens_map.json +7 -0
  21. checkpoint-1500/tokenizer.json +0 -0
  22. checkpoint-1500/tokenizer_config.json +55 -0
  23. checkpoint-1500/trainer_state.json +54 -0
  24. checkpoint-1500/training_args.bin +3 -0
  25. checkpoint-1500/vocab.txt +0 -0
  26. checkpoint-2000/config.json +49 -0
  27. checkpoint-2000/model.safetensors +3 -0
  28. checkpoint-2000/optimizer.pt +3 -0
  29. checkpoint-2000/rng_state.pth +3 -0
  30. checkpoint-2000/scheduler.pt +3 -0
  31. checkpoint-2000/special_tokens_map.json +7 -0
  32. checkpoint-2000/tokenizer.json +0 -0
  33. checkpoint-2000/tokenizer_config.json +55 -0
  34. checkpoint-2000/trainer_state.json +61 -0
  35. checkpoint-2000/training_args.bin +3 -0
  36. checkpoint-2000/vocab.txt +0 -0
  37. checkpoint-2500/config.json +49 -0
  38. checkpoint-2500/model.safetensors +3 -0
  39. checkpoint-2500/optimizer.pt +3 -0
  40. checkpoint-2500/rng_state.pth +3 -0
  41. checkpoint-2500/scheduler.pt +3 -0
  42. checkpoint-2500/special_tokens_map.json +7 -0
  43. checkpoint-2500/tokenizer.json +0 -0
  44. checkpoint-2500/tokenizer_config.json +55 -0
  45. checkpoint-2500/trainer_state.json +68 -0
  46. checkpoint-2500/training_args.bin +3 -0
  47. checkpoint-2500/vocab.txt +0 -0
  48. checkpoint-3000/config.json +49 -0
  49. checkpoint-3000/model.safetensors +3 -0
  50. checkpoint-3000/optimizer.pt +3 -0
.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint-1000/model.safetensors filter=lfs diff=lfs merge=lfs -text
2
+ checkpoint-1000/optimizer.pt filter=lfs diff=lfs merge=lfs -text
3
+ checkpoint-1500/model.safetensors filter=lfs diff=lfs merge=lfs -text
4
+ checkpoint-1500/optimizer.pt filter=lfs diff=lfs merge=lfs -text
5
+ checkpoint-2000/model.safetensors filter=lfs diff=lfs merge=lfs -text
6
+ checkpoint-2000/optimizer.pt filter=lfs diff=lfs merge=lfs -text
7
+ checkpoint-2500/model.safetensors filter=lfs diff=lfs merge=lfs -text
8
+ checkpoint-2500/optimizer.pt filter=lfs diff=lfs merge=lfs -text
9
+ checkpoint-3000/model.safetensors filter=lfs diff=lfs merge=lfs -text
10
+ checkpoint-3000/optimizer.pt filter=lfs diff=lfs merge=lfs -text
11
+ checkpoint-3500/model.safetensors filter=lfs diff=lfs merge=lfs -text
12
+ checkpoint-3500/optimizer.pt filter=lfs diff=lfs merge=lfs -text
13
+ checkpoint-4000/model.safetensors filter=lfs diff=lfs merge=lfs -text
14
+ checkpoint-4000/optimizer.pt filter=lfs diff=lfs merge=lfs -text
15
+ checkpoint-4500/model.safetensors filter=lfs diff=lfs merge=lfs -text
16
+ checkpoint-4500/optimizer.pt filter=lfs diff=lfs merge=lfs -text
17
+ checkpoint-500/model.safetensors filter=lfs diff=lfs merge=lfs -text
18
+ checkpoint-500/optimizer.pt filter=lfs diff=lfs merge=lfs -text
19
+ checkpoint-5000/model.safetensors filter=lfs diff=lfs merge=lfs -text
20
+ checkpoint-5000/optimizer.pt filter=lfs diff=lfs merge=lfs -text
21
+ checkpoint-5268/model.safetensors filter=lfs diff=lfs merge=lfs -text
22
+ checkpoint-5268/optimizer.pt filter=lfs diff=lfs merge=lfs -text
23
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
24
+ checkpoint-1000/rng_state.pth filter=lfs diff=lfs merge=lfs -text
25
+ checkpoint-1000/scheduler.pt filter=lfs diff=lfs merge=lfs -text
26
+ checkpoint-1000/training_args.bin filter=lfs diff=lfs merge=lfs -text
27
+ checkpoint-1500/rng_state.pth filter=lfs diff=lfs merge=lfs -text
28
+ checkpoint-1500/scheduler.pt filter=lfs diff=lfs merge=lfs -text
29
+ checkpoint-1500/training_args.bin filter=lfs diff=lfs merge=lfs -text
30
+ checkpoint-2000/rng_state.pth filter=lfs diff=lfs merge=lfs -text
31
+ checkpoint-2000/scheduler.pt filter=lfs diff=lfs merge=lfs -text
32
+ checkpoint-2000/training_args.bin filter=lfs diff=lfs merge=lfs -text
33
+ checkpoint-2500/rng_state.pth filter=lfs diff=lfs merge=lfs -text
34
+ checkpoint-2500/scheduler.pt filter=lfs diff=lfs merge=lfs -text
35
+ checkpoint-2500/training_args.bin filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-3000/rng_state.pth filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-3000/scheduler.pt filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-3000/training_args.bin filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-3500/rng_state.pth filter=lfs diff=lfs merge=lfs -text
40
+ checkpoint-3500/scheduler.pt filter=lfs diff=lfs merge=lfs -text
41
+ checkpoint-3500/training_args.bin filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-4000/rng_state.pth filter=lfs diff=lfs merge=lfs -text
43
+ checkpoint-4000/scheduler.pt filter=lfs diff=lfs merge=lfs -text
44
+ checkpoint-4000/training_args.bin filter=lfs diff=lfs merge=lfs -text
45
+ checkpoint-4500/rng_state.pth filter=lfs diff=lfs merge=lfs -text
46
+ checkpoint-4500/scheduler.pt filter=lfs diff=lfs merge=lfs -text
47
+ checkpoint-4500/training_args.bin filter=lfs diff=lfs merge=lfs -text
48
+ checkpoint-500/rng_state.pth filter=lfs diff=lfs merge=lfs -text
49
+ checkpoint-500/scheduler.pt filter=lfs diff=lfs merge=lfs -text
50
+ checkpoint-500/training_args.bin filter=lfs diff=lfs merge=lfs -text
51
+ checkpoint-5000/rng_state.pth filter=lfs diff=lfs merge=lfs -text
52
+ checkpoint-5000/scheduler.pt filter=lfs diff=lfs merge=lfs -text
53
+ checkpoint-5000/training_args.bin filter=lfs diff=lfs merge=lfs -text
54
+ checkpoint-5268/rng_state.pth filter=lfs diff=lfs merge=lfs -text
55
+ checkpoint-5268/scheduler.pt filter=lfs diff=lfs merge=lfs -text
56
+ checkpoint-5268/training_args.bin filter=lfs diff=lfs merge=lfs -text
57
+ runs/May26_06-25-40_c3c1060e04ec/events.out.tfevents.1716704756.c3c1060e04ec.954.0 filter=lfs diff=lfs merge=lfs -text
58
+ runs/May26_06-25-40_c3c1060e04ec/events.out.tfevents.1716705623.c3c1060e04ec.954.1 filter=lfs diff=lfs merge=lfs -text
59
+ training_args.bin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google-bert/bert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - conll2003
8
+ metrics:
9
+ - precision
10
+ - recall
11
+ - f1
12
+ - accuracy
13
+ model-index:
14
+ - name: test-ner
15
+ results:
16
+ - task:
17
+ name: Token Classification
18
+ type: token-classification
19
+ dataset:
20
+ name: conll2003
21
+ type: conll2003
22
+ config: conll2003
23
+ split: validation
24
+ args: conll2003
25
+ metrics:
26
+ - name: Precision
27
+ type: precision
28
+ value: 0.946619812583668
29
+ - name: Recall
30
+ type: recall
31
+ value: 0.9520363513968361
32
+ - name: F1
33
+ type: f1
34
+ value: 0.9493203557643899
35
+ - name: Accuracy
36
+ type: accuracy
37
+ value: 0.9896226782446167
38
+ ---
39
+
40
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
41
+ should probably proofread and complete it, then remove this comment. -->
42
+
43
+ # test-ner
44
+
45
+ This model is a fine-tuned version of [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) on the conll2003 dataset.
46
+ It achieves the following results on the evaluation set:
47
+ - Loss: 0.0498
48
+ - Precision: 0.9466
49
+ - Recall: 0.9520
50
+ - F1: 0.9493
51
+ - Accuracy: 0.9896
52
+
53
+ ## Model description
54
+
55
+ More information needed
56
+
57
+ ## Intended uses & limitations
58
+
59
+ More information needed
60
+
61
+ ## Training and evaluation data
62
+
63
+ More information needed
64
+
65
+ ## Training procedure
66
+
67
+ ### Training hyperparameters
68
+
69
+ The following hyperparameters were used during training:
70
+ - learning_rate: 5e-05
71
+ - train_batch_size: 8
72
+ - eval_batch_size: 8
73
+ - seed: 42
74
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
+ - lr_scheduler_type: linear
76
+ - num_epochs: 3.0
77
+
78
+ ### Training results
79
+
80
+
81
+
82
+ ### Framework versions
83
+
84
+ - Transformers 4.42.0.dev0
85
+ - Pytorch 2.3.0+cu121
86
+ - Datasets 2.19.1
87
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9896226782446167,
4
+ "eval_f1": 0.9493203557643899,
5
+ "eval_loss": 0.049828801304101944,
6
+ "eval_precision": 0.946619812583668,
7
+ "eval_recall": 0.9520363513968361,
8
+ "eval_runtime": 9.7904,
9
+ "eval_samples": 3250,
10
+ "eval_samples_per_second": 331.959,
11
+ "eval_steps_per_second": 41.572,
12
+ "total_flos": 891900038010780.0,
13
+ "train_loss": 0.043606681179258075,
14
+ "train_runtime": 849.8761,
15
+ "train_samples": 14041,
16
+ "train_samples_per_second": 49.564,
17
+ "train_steps_per_second": 6.199
18
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-PER",
16
+ "2": "I-PER",
17
+ "3": "B-ORG",
18
+ "4": "I-ORG",
19
+ "5": "B-LOC",
20
+ "6": "I-LOC",
21
+ "7": "B-MISC",
22
+ "8": "I-MISC"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-LOC": 5,
28
+ "B-MISC": 7,
29
+ "B-ORG": 3,
30
+ "B-PER": 1,
31
+ "I-LOC": 6,
32
+ "I-MISC": 8,
33
+ "I-ORG": 4,
34
+ "I-PER": 2,
35
+ "O": 0
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.42.0.dev0",
46
+ "type_vocab_size": 2,
47
+ "use_cache": true,
48
+ "vocab_size": 30522
49
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8859eb08391d9ad2839d519001d76ed4b6684e26917957403a396f2251732083
3
+ size 435617620
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22e4065bc1fa53ddd0ef1d3246f28c9b4cf22a2623a11209bfbfd708a1bfe744
3
+ size 871354938
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5486c9208386957d0c7174ac934979fe9d0d54ef0b04812efba089959b0af408
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5aa1c6be8c6e2d2dce7c70d0ae5c8f169db7ad1685d7af44aec127ca89e7d771
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.5694760820045558,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2847380410022779,
13
+ "grad_norm": 2.7607381343841553,
14
+ "learning_rate": 4.525436598329537e-05,
15
+ "loss": 0.1616,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5694760820045558,
20
+ "grad_norm": 0.94535231590271,
21
+ "learning_rate": 4.050873196659074e-05,
22
+ "loss": 0.0728,
23
+ "step": 1000
24
+ }
25
+ ],
26
+ "logging_steps": 500,
27
+ "max_steps": 5268,
28
+ "num_input_tokens_seen": 0,
29
+ "num_train_epochs": 3,
30
+ "save_steps": 500,
31
+ "stateful_callbacks": {
32
+ "TrainerControl": {
33
+ "args": {
34
+ "should_epoch_stop": false,
35
+ "should_evaluate": false,
36
+ "should_log": false,
37
+ "should_save": true,
38
+ "should_training_stop": false
39
+ },
40
+ "attributes": {}
41
+ }
42
+ },
43
+ "total_flos": 168934961106000.0,
44
+ "train_batch_size": 8,
45
+ "trial_name": null,
46
+ "trial_params": null
47
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63dfd2bc31481e060350e0c03d646ab35537d1667c980cc381c38e729511fc7a
3
+ size 5112
checkpoint-1000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-PER",
16
+ "2": "I-PER",
17
+ "3": "B-ORG",
18
+ "4": "I-ORG",
19
+ "5": "B-LOC",
20
+ "6": "I-LOC",
21
+ "7": "B-MISC",
22
+ "8": "I-MISC"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-LOC": 5,
28
+ "B-MISC": 7,
29
+ "B-ORG": 3,
30
+ "B-PER": 1,
31
+ "I-LOC": 6,
32
+ "I-MISC": 8,
33
+ "I-ORG": 4,
34
+ "I-PER": 2,
35
+ "O": 0
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.42.0.dev0",
46
+ "type_vocab_size": 2,
47
+ "use_cache": true,
48
+ "vocab_size": 30522
49
+ }
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dddbe9ac00db89de4092c47817c6b86a8fa82a7c2ab9a5383cf4cc4afa33ce0f
3
+ size 435617620
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2caa95c452f6aa92c455286336a1addf23205f13f405561e9410cd06645de3a
3
+ size 871354938
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34bbff53e9b22f9546e02ab21f1515bedcdaf4229641757488ca166068b3504e
3
+ size 14244
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1585352b864844478d94c858a87708af4c03469d44e8907d83c669b453b5654
3
+ size 1064
checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.8542141230068337,
5
+ "eval_steps": 500,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2847380410022779,
13
+ "grad_norm": 2.7607381343841553,
14
+ "learning_rate": 4.525436598329537e-05,
15
+ "loss": 0.1616,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5694760820045558,
20
+ "grad_norm": 0.94535231590271,
21
+ "learning_rate": 4.050873196659074e-05,
22
+ "loss": 0.0728,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.8542141230068337,
27
+ "grad_norm": 2.5376665592193604,
28
+ "learning_rate": 3.5763097949886106e-05,
29
+ "loss": 0.063,
30
+ "step": 1500
31
+ }
32
+ ],
33
+ "logging_steps": 500,
34
+ "max_steps": 5268,
35
+ "num_input_tokens_seen": 0,
36
+ "num_train_epochs": 3,
37
+ "save_steps": 500,
38
+ "stateful_callbacks": {
39
+ "TrainerControl": {
40
+ "args": {
41
+ "should_epoch_stop": false,
42
+ "should_evaluate": false,
43
+ "should_log": false,
44
+ "should_save": true,
45
+ "should_training_stop": false
46
+ },
47
+ "attributes": {}
48
+ }
49
+ },
50
+ "total_flos": 253845449351568.0,
51
+ "train_batch_size": 8,
52
+ "trial_name": null,
53
+ "trial_params": null
54
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63dfd2bc31481e060350e0c03d646ab35537d1667c980cc381c38e729511fc7a
3
+ size 5112
checkpoint-1500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-PER",
16
+ "2": "I-PER",
17
+ "3": "B-ORG",
18
+ "4": "I-ORG",
19
+ "5": "B-LOC",
20
+ "6": "I-LOC",
21
+ "7": "B-MISC",
22
+ "8": "I-MISC"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-LOC": 5,
28
+ "B-MISC": 7,
29
+ "B-ORG": 3,
30
+ "B-PER": 1,
31
+ "I-LOC": 6,
32
+ "I-MISC": 8,
33
+ "I-ORG": 4,
34
+ "I-PER": 2,
35
+ "O": 0
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.42.0.dev0",
46
+ "type_vocab_size": 2,
47
+ "use_cache": true,
48
+ "vocab_size": 30522
49
+ }
checkpoint-2000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:181a539611492bddef2493ae28041701978557110d8a1db609157b8209117b34
3
+ size 435617620
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bcf8db07e0a7c4dc767877db851f12444ee5c96b481d3fd50222cf5c4702164
3
+ size 871354938
checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35815f9cf8e7a0d3914a589a0d9bbcc907b37f2cc88601ab0c2117bab2340fb8
3
+ size 14244
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c9d598d8830ba0028be8296b6d503e067e8d4e490df6001932e48104c4d9029
3
+ size 1064
checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-2000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.1389521640091116,
5
+ "eval_steps": 500,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2847380410022779,
13
+ "grad_norm": 2.7607381343841553,
14
+ "learning_rate": 4.525436598329537e-05,
15
+ "loss": 0.1616,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5694760820045558,
20
+ "grad_norm": 0.94535231590271,
21
+ "learning_rate": 4.050873196659074e-05,
22
+ "loss": 0.0728,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.8542141230068337,
27
+ "grad_norm": 2.5376665592193604,
28
+ "learning_rate": 3.5763097949886106e-05,
29
+ "loss": 0.063,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.1389521640091116,
34
+ "grad_norm": 1.2084846496582031,
35
+ "learning_rate": 3.1017463933181475e-05,
36
+ "loss": 0.0425,
37
+ "step": 2000
38
+ }
39
+ ],
40
+ "logging_steps": 500,
41
+ "max_steps": 5268,
42
+ "num_input_tokens_seen": 0,
43
+ "num_train_epochs": 3,
44
+ "save_steps": 500,
45
+ "stateful_callbacks": {
46
+ "TrainerControl": {
47
+ "args": {
48
+ "should_epoch_stop": false,
49
+ "should_evaluate": false,
50
+ "should_log": false,
51
+ "should_save": true,
52
+ "should_training_stop": false
53
+ },
54
+ "attributes": {}
55
+ }
56
+ },
57
+ "total_flos": 339051956562216.0,
58
+ "train_batch_size": 8,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63dfd2bc31481e060350e0c03d646ab35537d1667c980cc381c38e729511fc7a
3
+ size 5112
checkpoint-2000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-PER",
16
+ "2": "I-PER",
17
+ "3": "B-ORG",
18
+ "4": "I-ORG",
19
+ "5": "B-LOC",
20
+ "6": "I-LOC",
21
+ "7": "B-MISC",
22
+ "8": "I-MISC"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-LOC": 5,
28
+ "B-MISC": 7,
29
+ "B-ORG": 3,
30
+ "B-PER": 1,
31
+ "I-LOC": 6,
32
+ "I-MISC": 8,
33
+ "I-ORG": 4,
34
+ "I-PER": 2,
35
+ "O": 0
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.42.0.dev0",
46
+ "type_vocab_size": 2,
47
+ "use_cache": true,
48
+ "vocab_size": 30522
49
+ }
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e75305c3713d4f0ca32d70a139dcd4d263a12565737643e4b4751a884f0e3f
3
+ size 435617620
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44ed3b7b70a1d855f9ec6baef66d7f950003750c55ac8b8149445d4bee597b0
3
+ size 871354938
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc5936524aaca82b4b7c78f0b94793f2239e75063af0ce6a1226295512a42cb
3
+ size 14244
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53d07b70aee919d41805e8d2b1fbb2b2a58eb6bcf1c92c65db4f753386ace213
3
+ size 1064
checkpoint-2500/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
checkpoint-2500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.4236902050113895,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2847380410022779,
13
+ "grad_norm": 2.7607381343841553,
14
+ "learning_rate": 4.525436598329537e-05,
15
+ "loss": 0.1616,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5694760820045558,
20
+ "grad_norm": 0.94535231590271,
21
+ "learning_rate": 4.050873196659074e-05,
22
+ "loss": 0.0728,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.8542141230068337,
27
+ "grad_norm": 2.5376665592193604,
28
+ "learning_rate": 3.5763097949886106e-05,
29
+ "loss": 0.063,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.1389521640091116,
34
+ "grad_norm": 1.2084846496582031,
35
+ "learning_rate": 3.1017463933181475e-05,
36
+ "loss": 0.0425,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.4236902050113895,
41
+ "grad_norm": 0.5610941648483276,
42
+ "learning_rate": 2.6271829916476843e-05,
43
+ "loss": 0.0298,
44
+ "step": 2500
45
+ }
46
+ ],
47
+ "logging_steps": 500,
48
+ "max_steps": 5268,
49
+ "num_input_tokens_seen": 0,
50
+ "num_train_epochs": 3,
51
+ "save_steps": 500,
52
+ "stateful_callbacks": {
53
+ "TrainerControl": {
54
+ "args": {
55
+ "should_epoch_stop": false,
56
+ "should_evaluate": false,
57
+ "should_log": false,
58
+ "should_save": true,
59
+ "should_training_stop": false
60
+ },
61
+ "attributes": {}
62
+ }
63
+ },
64
+ "total_flos": 422500723573320.0,
65
+ "train_batch_size": 8,
66
+ "trial_name": null,
67
+ "trial_params": null
68
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63dfd2bc31481e060350e0c03d646ab35537d1667c980cc381c38e729511fc7a
3
+ size 5112
checkpoint-2500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3000/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google-bert/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "ner",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "O",
15
+ "1": "B-PER",
16
+ "2": "I-PER",
17
+ "3": "B-ORG",
18
+ "4": "I-ORG",
19
+ "5": "B-LOC",
20
+ "6": "I-LOC",
21
+ "7": "B-MISC",
22
+ "8": "I-MISC"
23
+ },
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 3072,
26
+ "label2id": {
27
+ "B-LOC": 5,
28
+ "B-MISC": 7,
29
+ "B-ORG": 3,
30
+ "B-PER": 1,
31
+ "I-LOC": 6,
32
+ "I-MISC": 8,
33
+ "I-ORG": 4,
34
+ "I-PER": 2,
35
+ "O": 0
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "max_position_embeddings": 512,
39
+ "model_type": "bert",
40
+ "num_attention_heads": 12,
41
+ "num_hidden_layers": 12,
42
+ "pad_token_id": 0,
43
+ "position_embedding_type": "absolute",
44
+ "torch_dtype": "float32",
45
+ "transformers_version": "4.42.0.dev0",
46
+ "type_vocab_size": 2,
47
+ "use_cache": true,
48
+ "vocab_size": 30522
49
+ }
checkpoint-3000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587aa9eab668557375b4539eed73274856418beacf970fcd5440f68fe5421bdf
3
+ size 435617620
checkpoint-3000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4cc3ba9a368c14b65feffd150ee7aa16aac5c35e61bd0d704f7353de3473ce
3
+ size 871354938