Update log.txt
Browse files
log.txt
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
2 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
3 |
+
"attention_probs_dropout_prob": 0.1,
|
4 |
+
"hidden_act": "gelu",
|
5 |
+
"hidden_dropout_prob": 0.1,
|
6 |
+
"hidden_size": 384,
|
7 |
+
"initializer_range": 0.02,
|
8 |
+
"intermediate_size": 1536,
|
9 |
+
"layer_norm_eps": 1e-12,
|
10 |
+
"max_position_embeddings": 512,
|
11 |
+
"model_type": "bert",
|
12 |
+
"num_attention_heads": 12,
|
13 |
+
"num_hidden_layers": 12,
|
14 |
+
"pad_token_id": 0,
|
15 |
+
"type_vocab_size": 2,
|
16 |
+
"vocab_size": 250037
|
17 |
+
}
|
18 |
+
|
19 |
+
INFO:transformers.modeling_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
20 |
+
INFO:transformers.modeling_utils:Weights of BertModel not initialized from pretrained model: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias']
|
21 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
22 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
23 |
+
"attention_probs_dropout_prob": 0.1,
|
24 |
+
"hidden_act": "gelu",
|
25 |
+
"hidden_dropout_prob": 0.1,
|
26 |
+
"hidden_size": 384,
|
27 |
+
"initializer_range": 0.02,
|
28 |
+
"intermediate_size": 1536,
|
29 |
+
"layer_norm_eps": 1e-12,
|
30 |
+
"max_position_embeddings": 512,
|
31 |
+
"model_type": "bert",
|
32 |
+
"num_attention_heads": 12,
|
33 |
+
"num_hidden_layers": 12,
|
34 |
+
"pad_token_id": 0,
|
35 |
+
"type_vocab_size": 2,
|
36 |
+
"vocab_size": 250037
|
37 |
+
}
|
38 |
+
|
39 |
+
INFO:transformers.modeling_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
40 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
41 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
42 |
+
"attention_probs_dropout_prob": 0.1,
|
43 |
+
"hidden_act": "gelu",
|
44 |
+
"hidden_dropout_prob": 0.1,
|
45 |
+
"hidden_size": 384,
|
46 |
+
"initializer_range": 0.02,
|
47 |
+
"intermediate_size": 1536,
|
48 |
+
"layer_norm_eps": 1e-12,
|
49 |
+
"max_position_embeddings": 512,
|
50 |
+
"model_type": "bert",
|
51 |
+
"num_attention_heads": 12,
|
52 |
+
"num_hidden_layers": 12,
|
53 |
+
"pad_token_id": 0,
|
54 |
+
"type_vocab_size": 2,
|
55 |
+
"vocab_size": 250037
|
56 |
+
}
|
57 |
+
|
58 |
+
INFO:transformers.modeling_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
59 |
+
INFO:transformers.modeling_utils:Weights of BertModel not initialized from pretrained model: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias']
|
60 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
61 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
62 |
+
"attention_probs_dropout_prob": 0.1,
|
63 |
+
"hidden_act": "gelu",
|
64 |
+
"hidden_dropout_prob": 0.1,
|
65 |
+
"hidden_size": 384,
|
66 |
+
"initializer_range": 0.02,
|
67 |
+
"intermediate_size": 1536,
|
68 |
+
"layer_norm_eps": 1e-12,
|
69 |
+
"max_position_embeddings": 512,
|
70 |
+
"model_type": "bert",
|
71 |
+
"num_attention_heads": 12,
|
72 |
+
"num_hidden_layers": 12,
|
73 |
+
"pad_token_id": 0,
|
74 |
+
"type_vocab_size": 2,
|
75 |
+
"vocab_size": 250037
|
76 |
+
}
|
77 |
+
|
78 |
+
INFO:transformers.modeling_tf_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
79 |
+
INFO:transformers.modeling_tf_pytorch_utils:Loading PyTorch weights from /home/patrick/hugging_face/models/Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
80 |
+
INFO:transformers.modeling_tf_pytorch_utils:PyTorch checkpoint contains 117,904,565 parameters
|
81 |
+
INFO:transformers.modeling_tf_pytorch_utils:Loaded 117,505,920 parameters in the TF 2.0 model.
|
82 |
+
INFO:transformers.modeling_tf_pytorch_utils:Weights or buffers not loaded from PyTorch model: {'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.bias'}
|
83 |
+
INFO:transformers.configuration_utils:Configuration saved in ./config.json
|
84 |
+
INFO:transformers.modeling_utils:Model weights saved in ./pytorch_model.bin
|
85 |
+
INFO:transformers.configuration_utils:Configuration saved in ./config.json
|
86 |
+
INFO:transformers.modeling_tf_utils:Model weights saved in ./tf_model.h5
|
87 |
+
INFO:transformers.tokenization_utils_base:Model name '../../MiniLM-L12-H384-uncased/' not found in model shortcut name list (xlm-roberta-base, xlm-roberta-large, xlm-roberta-large-finetuned-conll02-dutch, xlm-roberta-large-finetuned-conll02-spanish, xlm-roberta-large-finetuned-conll03-english, xlm-roberta-large-finetuned-conll03-german). Assuming '../../MiniLM-L12-H384-uncased/' is a path, a model identifier, or url to a directory containing tokenizer files.
|
88 |
+
INFO:transformers.tokenization_utils_base:Didn't find file ../../MiniLM-L12-H384-uncased/sentencepiece.bpe.model. We won't load it.
|
89 |
+
INFO:transformers.tokenization_utils_base:Didn't find file ../../MiniLM-L12-H384-uncased/added_tokens.json. We won't load it.
|
90 |
+
INFO:transformers.tokenization_utils_base:Didn't find file ../../MiniLM-L12-H384-uncased/special_tokens_map.json. We won't load it.
|
91 |
+
INFO:transformers.tokenization_utils_base:Didn't find file ../../MiniLM-L12-H384-uncased/tokenizer_config.json. We won't load it.
|
92 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
93 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
94 |
+
"attention_probs_dropout_prob": 0.1,
|
95 |
+
"hidden_act": "gelu",
|
96 |
+
"hidden_dropout_prob": 0.1,
|
97 |
+
"hidden_size": 384,
|
98 |
+
"initializer_range": 0.02,
|
99 |
+
"intermediate_size": 1536,
|
100 |
+
"layer_norm_eps": 1e-12,
|
101 |
+
"max_position_embeddings": 512,
|
102 |
+
"model_type": "bert",
|
103 |
+
"num_attention_heads": 12,
|
104 |
+
"num_hidden_layers": 12,
|
105 |
+
"pad_token_id": 0,
|
106 |
+
"type_vocab_size": 2,
|
107 |
+
"vocab_size": 250037
|
108 |
+
}
|
109 |
+
|
110 |
+
INFO:transformers.modeling_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
111 |
+
INFO:transformers.modeling_utils:Weights of BertModel not initialized from pretrained model: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias']
|
112 |
+
INFO:transformers.configuration_utils:loading configuration file ../../Multilingual-MiniLM-L12-H384/config.json
|
113 |
+
INFO:transformers.configuration_utils:Model config BertConfig {
|
114 |
+
"attention_probs_dropout_prob": 0.1,
|
115 |
+
"hidden_act": "gelu",
|
116 |
+
"hidden_dropout_prob": 0.1,
|
117 |
+
"hidden_size": 384,
|
118 |
+
"initializer_range": 0.02,
|
119 |
+
"intermediate_size": 1536,
|
120 |
+
"layer_norm_eps": 1e-12,
|
121 |
+
"max_position_embeddings": 512,
|
122 |
+
"model_type": "bert",
|
123 |
+
"num_attention_heads": 12,
|
124 |
+
"num_hidden_layers": 12,
|
125 |
+
"pad_token_id": 0,
|
126 |
+
"type_vocab_size": 2,
|
127 |
+
"vocab_size": 250037
|
128 |
+
}
|
129 |
+
|
130 |
+
INFO:transformers.modeling_tf_utils:loading weights file ../../Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
131 |
+
INFO:transformers.modeling_tf_pytorch_utils:Loading PyTorch weights from /home/patrick/hugging_face/models/Multilingual-MiniLM-L12-H384/pytorch_model.bin
|
132 |
+
INFO:transformers.modeling_tf_pytorch_utils:PyTorch checkpoint contains 117,904,565 parameters
|
133 |
+
INFO:transformers.modeling_tf_pytorch_utils:Loaded 117,505,920 parameters in the TF 2.0 model.
|
134 |
+
INFO:transformers.modeling_tf_pytorch_utils:Weights or buffers not loaded from PyTorch model: {'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias'}
|
135 |
+
INFO:transformers.configuration_utils:Configuration saved in ./config.json
|
136 |
+
INFO:transformers.modeling_utils:Model weights saved in ./pytorch_model.bin
|
137 |
+
INFO:transformers.configuration_utils:Configuration saved in ./config.json
|
138 |
+
INFO:transformers.modeling_tf_utils:Model weights saved in ./tf_model.h5
|
139 |
+
INFO:transformers.tokenization_utils_base:Model name '../../Multilingual-MiniLM-L12-H384/sentencepiece.bpe.model' not found in model shortcut name list (xlm-roberta-base, xlm-roberta-large, xlm-roberta-large-finetuned-conll02-dutch, xlm-roberta-large-finetuned-conll02-spanish, xlm-roberta-large-finetuned-conll03-english, xlm-roberta-large-finetuned-conll03-german). Assuming '../../Multilingual-MiniLM-L12-H384/sentencepiece.bpe.model' is a path, a model identifier, or url to a directory containing tokenizer files.
|
140 |
+
WARNING:transformers.tokenization_utils_base:Calling XLMRobertaTokenizer.from_pretrained() with the path to a single file or url is deprecated
|
141 |
+
INFO:transformers.tokenization_utils_base:loading file ../../Multilingual-MiniLM-L12-H384/sentencepiece.bpe.model
|