real-jiakai commited on
Commit
20f8e26
1 Parent(s): e29858c

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eval_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ checkpoint-*/
2
+ .ipynb_checkpoints
README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ base_model: bert-base-chinese
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - cmrc2018
8
+ model-index:
9
+ - name: chinese_qa
10
+ results: []
11
+ ---
12
+
13
+ # bert-base-chinese-finetuned-cmrc2018
14
+
15
+ This model is a fine-tuned version of [bert-base-chinese](https://huggingface.co/bert-base-chinese) on the CMRC2018 (Chinese Machine Reading Comprehension) dataset.
16
+
17
+ ## Model Description
18
+
19
+ This is a BERT-based extractive question answering model for Chinese text. The model is designed to locate and extract answer spans from given contexts in response to questions.
20
+
21
+ Key Features:
22
+ - Base Model: bert-base-chinese
23
+ - Task: Extractive Question Answering
24
+ - Language: Chinese
25
+ - Training Dataset: CMRC2018
26
+
27
+ ## Performance Metrics
28
+
29
+ Evaluation results on the test set:
30
+ - Exact Match: 59.708
31
+ - F1 Score: 60.0723
32
+ - Number of evaluation samples: 6,254
33
+ - Evaluation speed: 283.054 samples/second
34
+
35
+ ## Intended Uses & Limitations
36
+
37
+ ### Intended Uses
38
+ - Chinese reading comprehension tasks
39
+ - Answer extraction from given documents
40
+ - Context-based question answering systems
41
+
42
+ ### Limitations
43
+ - Only supports extractive QA (cannot generate new answers)
44
+ - Answers must be present in the context
45
+ - Does not support multi-hop reasoning
46
+ - Cannot handle unanswerable questions
47
+
48
+ ## Training Details
49
+
50
+ ### Training Hyperparameters
51
+ - Learning rate: 3e-05
52
+ - Train batch size: 12
53
+ - Eval batch size: 8
54
+ - Seed: 42
55
+ - Optimizer: AdamW (betas=(0.9,0.999), epsilon=1e-08)
56
+ - LR scheduler: linear
57
+ - Number of epochs: 5.0
58
+
59
+ ### Training Results
60
+ - Training time: 892.86 seconds
61
+ - Training samples: 18,960
62
+ - Training speed: 106.175 samples/second
63
+ - Training loss: 0.5625
64
+
65
+ ### Framework Versions
66
+ - Transformers: 4.47.0.dev0
67
+ - Pytorch: 2.5.1+cu124
68
+ - Datasets: 3.1.0
69
+ - Tokenizers: 20.3
70
+
71
+ ## Usage
72
+
73
+ ```python
74
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer
75
+
76
+ # Load model and tokenizer
77
+ model = AutoModelForQuestionAnswering.from_pretrained("real-jiakai/bert-base-chinese-finetuned-cmrc2018")
78
+ tokenizer = AutoTokenizer.from_pretrained("real-jiakai/bert-base-chinese-finetuned-cmrc2018")
79
+
80
+ # Prepare inputs
81
+ question = "Your question in Chinese"
82
+ context = "Context text in Chinese"
83
+
84
+ # Tokenize inputs
85
+ inputs = tokenizer(
86
+ question,
87
+ context,
88
+ return_tensors="pt",
89
+ max_length=384,
90
+ truncation=True,
91
+ return_offsets_mapping=True
92
+ )
93
+
94
+ # Get answer
95
+ outputs = model(**inputs)
96
+ start_logits = outputs.start_logits
97
+ end_logits = outputs.end_logits
98
+ ```
99
+
100
+ ## Citation
101
+
102
+ If you use this model, please cite the CMRC2018 dataset:
103
+
104
+ ```bibtex
105
+ @inproceedings{cui-emnlp2019-cmrc2018,
106
+ title = "A Span-Extraction Dataset for {C}hinese Machine Reading Comprehension",
107
+ author = "Cui, Yiming and
108
+ Liu, Ting and
109
+ Che, Wanxiang and
110
+ Xiao, Li and
111
+ Chen, Zhipeng and
112
+ Ma, Wentao and
113
+ Wang, Shijin and
114
+ Hu, Guoping",
115
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
116
+ month = nov,
117
+ year = "2019",
118
+ address = "Hong Kong, China",
119
+ publisher = "Association for Computational Linguistics",
120
+ url = "https://www.aclweb.org/anthology/D19-1600",
121
+ doi = "10.18653/v1/D19-1600",
122
+ pages = "5886--5891",
123
+ }
124
+
125
+ ```
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_exact_match": 59.70798384591488,
4
+ "eval_f1": 60.07226438260921,
5
+ "eval_runtime": 22.0947,
6
+ "eval_samples": 6254,
7
+ "eval_samples_per_second": 283.054,
8
+ "eval_steps_per_second": 35.393,
9
+ "total_flos": 1.85781994039296e+16,
10
+ "train_loss": 0.5625110177148747,
11
+ "train_runtime": 892.8635,
12
+ "train_samples": 18960,
13
+ "train_samples_per_second": 106.175,
14
+ "train_steps_per_second": 8.848
15
+ }
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-chinese",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "position_embedding_type": "absolute",
26
+ "torch_dtype": "float32",
27
+ "transformers_version": "4.47.0.dev0",
28
+ "type_vocab_size": 2,
29
+ "use_cache": true,
30
+ "vocab_size": 21128
31
+ }
eval_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bd8518d93bc9911d4827c78f194a9c50a9b54db76f917f40c4cfd92a7eab4fc
3
+ size 15944496
eval_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_exact_match": 59.70798384591488,
4
+ "eval_f1": 60.07226438260921,
5
+ "eval_runtime": 22.0947,
6
+ "eval_samples": 6254,
7
+ "eval_samples_per_second": 283.054,
8
+ "eval_steps_per_second": 35.393
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e90f2d2ba5c13181eaac2f1df4286544f64c846a0715733e34c4818e91d18fe8
3
+ size 406737680
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "BertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 1.85781994039296e+16,
4
+ "train_loss": 0.5625110177148747,
5
+ "train_runtime": 892.8635,
6
+ "train_samples": 18960,
7
+ "train_samples_per_second": 106.175,
8
+ "train_steps_per_second": 8.848
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 7900,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31645569620253167,
13
+ "grad_norm": 16.740053176879883,
14
+ "learning_rate": 2.8101265822784812e-05,
15
+ "loss": 1.7983,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.6329113924050633,
20
+ "grad_norm": 18.301847457885742,
21
+ "learning_rate": 2.620253164556962e-05,
22
+ "loss": 1.1858,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.9493670886075949,
27
+ "grad_norm": 19.28789710998535,
28
+ "learning_rate": 2.430379746835443e-05,
29
+ "loss": 1.0645,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.2658227848101267,
34
+ "grad_norm": 27.41464614868164,
35
+ "learning_rate": 2.240506329113924e-05,
36
+ "loss": 0.7401,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.5822784810126582,
41
+ "grad_norm": 22.102096557617188,
42
+ "learning_rate": 2.050632911392405e-05,
43
+ "loss": 0.7248,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 1.8987341772151898,
48
+ "grad_norm": 29.267433166503906,
49
+ "learning_rate": 1.860759493670886e-05,
50
+ "loss": 0.7217,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 2.2151898734177213,
55
+ "grad_norm": 14.63181209564209,
56
+ "learning_rate": 1.670886075949367e-05,
57
+ "loss": 0.4984,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 2.5316455696202533,
62
+ "grad_norm": 15.814390182495117,
63
+ "learning_rate": 1.4810126582278482e-05,
64
+ "loss": 0.4188,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 2.848101265822785,
69
+ "grad_norm": 14.612494468688965,
70
+ "learning_rate": 1.2911392405063291e-05,
71
+ "loss": 0.4127,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 3.1645569620253164,
76
+ "grad_norm": 39.51222229003906,
77
+ "learning_rate": 1.1012658227848103e-05,
78
+ "loss": 0.3182,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 3.481012658227848,
83
+ "grad_norm": 13.681534767150879,
84
+ "learning_rate": 9.113924050632912e-06,
85
+ "loss": 0.2268,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 3.7974683544303796,
90
+ "grad_norm": 11.261092185974121,
91
+ "learning_rate": 7.215189873417722e-06,
92
+ "loss": 0.2346,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 4.113924050632911,
97
+ "grad_norm": 19.033397674560547,
98
+ "learning_rate": 5.3164556962025316e-06,
99
+ "loss": 0.1939,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 4.430379746835443,
104
+ "grad_norm": 25.222740173339844,
105
+ "learning_rate": 3.4177215189873417e-06,
106
+ "loss": 0.1312,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 4.746835443037975,
111
+ "grad_norm": 19.743125915527344,
112
+ "learning_rate": 1.518987341772152e-06,
113
+ "loss": 0.1167,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 5.0,
118
+ "step": 7900,
119
+ "total_flos": 1.85781994039296e+16,
120
+ "train_loss": 0.5625110177148747,
121
+ "train_runtime": 892.8635,
122
+ "train_samples_per_second": 106.175,
123
+ "train_steps_per_second": 8.848
124
+ }
125
+ ],
126
+ "logging_steps": 500,
127
+ "max_steps": 7900,
128
+ "num_input_tokens_seen": 0,
129
+ "num_train_epochs": 5,
130
+ "save_steps": 500,
131
+ "stateful_callbacks": {
132
+ "TrainerControl": {
133
+ "args": {
134
+ "should_epoch_stop": false,
135
+ "should_evaluate": false,
136
+ "should_log": false,
137
+ "should_save": true,
138
+ "should_training_stop": true
139
+ },
140
+ "attributes": {}
141
+ }
142
+ },
143
+ "total_flos": 1.85781994039296e+16,
144
+ "train_batch_size": 12,
145
+ "trial_name": null,
146
+ "trial_params": null
147
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b9f3a52d8ffe066ddc8a698d8d91df86a4c62a61449a6d1931eba6071252476
3
+ size 5304
vocab.txt ADDED
The diff for this file is too large to render. See raw diff