Md Mushfiqur Rahman commited on
Commit
10bc378
1 Parent(s): 0d8f6b5

Upload with huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ ---
6
+
7
+ # BERT multilingual base model (cased)
8
+
9
+ Pretrained model on the English dataset using a masked language modeling (MLM) objective.
10
+ It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in
11
+ [this repository](https://github.com/google-research/bert). This model is case sensitive: it makes a difference
12
+ between english and English.
13
+
14
+ ## Model description
15
+
16
+ BERT is a transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means
17
+ it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of
18
+ publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it
19
+ was pretrained with two objectives:
20
+
21
+ - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run
22
+ the entire masked sentence through the model and has to predict the masked words. This is different from traditional
23
+ recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like
24
+ GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the
25
+ sentence.
26
+ - Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes
27
+ they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to
28
+ predict if the two sentences were following each other or not.
29
+
30
+ The pretrained model has been finetuned for one specific language for one specific task.
31
+
32
+ ### How to use
33
+
34
+ Here is how to use this model to get the features of a given text in PyTorch:
35
+
36
+ ```python
37
+ from transformers import BertTokenizer, BertModel
38
+ model = BertModel.from_pretrained("mushfiqur11/<repo_name>")
39
+ ```
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "O",
14
+ "1": "B-DATE",
15
+ "2": "I-DATE",
16
+ "3": "B-PER",
17
+ "4": "I-PER",
18
+ "5": "B-ORG",
19
+ "6": "I-ORG",
20
+ "7": "B-LOC",
21
+ "8": "I-LOC"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "B-DATE": 1,
27
+ "B-LOC": 7,
28
+ "B-ORG": 5,
29
+ "B-PER": 3,
30
+ "I-DATE": 2,
31
+ "I-LOC": 8,
32
+ "I-ORG": 6,
33
+ "I-PER": 4,
34
+ "O": 0
35
+ },
36
+ "layer_norm_eps": 1e-12,
37
+ "max_position_embeddings": 512,
38
+ "model_type": "bert",
39
+ "num_attention_heads": 12,
40
+ "num_hidden_layers": 12,
41
+ "pad_token_id": 0,
42
+ "position_embedding_type": "absolute",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.17.0",
45
+ "type_vocab_size": 2,
46
+ "use_cache": true,
47
+ "vocab_size": 28996
48
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a51d792ad06f5991cf1d0e19ed3865185058d00fd39698f63ffc75a931fece4b
3
+ size 430992429
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
test_predictions.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "add_prefix_space": false, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-cased", "tokenizer_class": "BertTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9280742459396751,
3
+ "best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/bert/bert-base-finetuned-masakhaner-pcm/checkpoint-1000",
4
+ "epoch": 29.850746268656717,
5
+ "global_step": 2000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 2.99,
12
+ "eval_accuracy_score": 0.9857660455486542,
13
+ "eval_f1": 0.9040060468631896,
14
+ "eval_loss": 0.050759539008140564,
15
+ "eval_precision": 0.872992700729927,
16
+ "eval_recall": 0.9373040752351097,
17
+ "eval_runtime": 4.2853,
18
+ "eval_samples_per_second": 71.408,
19
+ "eval_steps_per_second": 9.101,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 5.97,
24
+ "eval_accuracy_score": 0.9886128364389234,
25
+ "eval_f1": 0.9248826291079814,
26
+ "eval_loss": 0.058611851185560226,
27
+ "eval_precision": 0.9234375,
28
+ "eval_recall": 0.9263322884012539,
29
+ "eval_runtime": 4.2846,
30
+ "eval_samples_per_second": 71.419,
31
+ "eval_steps_per_second": 9.102,
32
+ "step": 400
33
+ },
34
+ {
35
+ "epoch": 7.46,
36
+ "learning_rate": 4.865771812080537e-05,
37
+ "loss": 0.1357,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 8.96,
42
+ "eval_accuracy_score": 0.9886128364389234,
43
+ "eval_f1": 0.9205607476635514,
44
+ "eval_loss": 0.06370853632688522,
45
+ "eval_precision": 0.9148606811145511,
46
+ "eval_recall": 0.9263322884012539,
47
+ "eval_runtime": 4.2868,
48
+ "eval_samples_per_second": 71.382,
49
+ "eval_steps_per_second": 9.098,
50
+ "step": 600
51
+ },
52
+ {
53
+ "epoch": 11.94,
54
+ "eval_accuracy_score": 0.9877070393374742,
55
+ "eval_f1": 0.9252336448598131,
56
+ "eval_loss": 0.07289239019155502,
57
+ "eval_precision": 0.9195046439628483,
58
+ "eval_recall": 0.9310344827586207,
59
+ "eval_runtime": 4.2837,
60
+ "eval_samples_per_second": 71.433,
61
+ "eval_steps_per_second": 9.104,
62
+ "step": 800
63
+ },
64
+ {
65
+ "epoch": 14.93,
66
+ "learning_rate": 4.697986577181208e-05,
67
+ "loss": 0.0029,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 14.93,
72
+ "eval_accuracy_score": 0.9880952380952381,
73
+ "eval_f1": 0.9280742459396751,
74
+ "eval_loss": 0.07048221677541733,
75
+ "eval_precision": 0.916030534351145,
76
+ "eval_recall": 0.9404388714733543,
77
+ "eval_runtime": 4.2789,
78
+ "eval_samples_per_second": 71.514,
79
+ "eval_steps_per_second": 9.115,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 17.91,
84
+ "eval_accuracy_score": 0.9858954451345756,
85
+ "eval_f1": 0.9140201394268009,
86
+ "eval_loss": 0.08080413192510605,
87
+ "eval_precision": 0.9035222052067381,
88
+ "eval_recall": 0.9247648902821317,
89
+ "eval_runtime": 4.2826,
90
+ "eval_samples_per_second": 71.452,
91
+ "eval_steps_per_second": 9.107,
92
+ "step": 1200
93
+ },
94
+ {
95
+ "epoch": 20.9,
96
+ "eval_accuracy_score": 0.9879658385093167,
97
+ "eval_f1": 0.921278254091972,
98
+ "eval_loss": 0.0840262845158577,
99
+ "eval_precision": 0.9162790697674419,
100
+ "eval_recall": 0.9263322884012539,
101
+ "eval_runtime": 4.2834,
102
+ "eval_samples_per_second": 71.439,
103
+ "eval_steps_per_second": 9.105,
104
+ "step": 1400
105
+ },
106
+ {
107
+ "epoch": 22.39,
108
+ "learning_rate": 4.530201342281879e-05,
109
+ "loss": 0.0018,
110
+ "step": 1500
111
+ },
112
+ {
113
+ "epoch": 23.88,
114
+ "eval_accuracy_score": 0.9855072463768116,
115
+ "eval_f1": 0.9083969465648855,
116
+ "eval_loss": 0.09424283355474472,
117
+ "eval_precision": 0.8854166666666666,
118
+ "eval_recall": 0.932601880877743,
119
+ "eval_runtime": 4.2901,
120
+ "eval_samples_per_second": 71.328,
121
+ "eval_steps_per_second": 9.091,
122
+ "step": 1600
123
+ },
124
+ {
125
+ "epoch": 26.87,
126
+ "eval_accuracy_score": 0.9874482401656315,
127
+ "eval_f1": 0.9267912772585669,
128
+ "eval_loss": 0.08567387610673904,
129
+ "eval_precision": 0.9210526315789473,
130
+ "eval_recall": 0.932601880877743,
131
+ "eval_runtime": 4.2857,
132
+ "eval_samples_per_second": 71.4,
133
+ "eval_steps_per_second": 9.1,
134
+ "step": 1800
135
+ },
136
+ {
137
+ "epoch": 29.85,
138
+ "learning_rate": 4.36241610738255e-05,
139
+ "loss": 0.0019,
140
+ "step": 2000
141
+ },
142
+ {
143
+ "epoch": 29.85,
144
+ "eval_accuracy_score": 0.9856366459627329,
145
+ "eval_f1": 0.9179566563467493,
146
+ "eval_loss": 0.09690915793180466,
147
+ "eval_precision": 0.9067278287461774,
148
+ "eval_recall": 0.9294670846394985,
149
+ "eval_runtime": 4.282,
150
+ "eval_samples_per_second": 71.462,
151
+ "eval_steps_per_second": 9.108,
152
+ "step": 2000
153
+ },
154
+ {
155
+ "epoch": 29.85,
156
+ "step": 2000,
157
+ "total_flos": 8286244530923520.0,
158
+ "train_loss": 0.035582695484161375,
159
+ "train_runtime": 2228.4914,
160
+ "train_samples_per_second": 215.392,
161
+ "train_steps_per_second": 6.731
162
+ }
163
+ ],
164
+ "max_steps": 15000,
165
+ "num_train_epochs": 224,
166
+ "total_flos": 8286244530923520.0,
167
+ "trial_name": null,
168
+ "trial_params": null
169
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac01d85f765a077a924ac18ffd14c26b1d698a2fd76cec84313c4044a57f1637
3
+ size 3259
vocab.txt ADDED
The diff for this file is too large to render. See raw diff