Tongjilibo commited on
Commit
e086ae4
1 Parent(s): e765c34

Upload 3 files

Browse files
Files changed (3) hide show
  1. bert4torch_config.json +16 -0
  2. convert.py +76 -0
  3. vocab.txt +0 -0
bert4torch_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.0,
3
+ "directionality": "bidi",
4
+ "hidden_act": "gelu",
5
+ "hidden_dropout_prob": 0.0,
6
+ "hidden_size": 384,
7
+ "embedding_size": 128,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 1536,
10
+ "max_position_embeddings": 512,
11
+ "num_attention_heads": 12,
12
+ "num_hidden_layers": 6,
13
+ "type_vocab_size": 2,
14
+ "vocab_size": 21128,
15
+ "segment_vocab_size": 0
16
+ }
convert.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # roberta-small预训练模型tensorflow转pytorch
2
+ # 源项目:https://github.com/ZhuiyiTechnology/pretrained-models
3
+ # roberta-small下载: https://open.zhuiyi.ai/releases/nlp/models/zhuiyi/chinese_roberta_L-6_H-384_A-12.zip
4
+ # 注意苏神版本的roberta-small/tiny的ckpt无pooler层, 区别于bert base转换脚本需要删除pooler层
5
+ # 使用的时候需要with_pool=False, 否则会有warnings, CLS的输出直接按last_hidden_state[:, 0]取得
6
+
7
+
8
+ import torch
9
+ import tensorflow as tf
10
+ import json
11
+
12
+
13
+ def convert(tf_dir):
14
+ tf_path = tf_dir + 'bert_model.ckpt'
15
+ torch_path = tf_dir + 'pytorch_model.bin'
16
+
17
+
18
+ with open(tf_dir + 'bert_config.json', 'r') as f:
19
+ config = json.load(f)
20
+ num_layers = config['num_hidden_layers']
21
+
22
+ torch_state_dict = {}
23
+
24
+ prefix = 'bert'
25
+ mapping = {
26
+ 'bert/embeddings/word_embeddings': f'{prefix}.embeddings.word_embeddings.weight',
27
+ 'bert/embeddings/position_embeddings': f'{prefix}.embeddings.position_embeddings.weight',
28
+ 'bert/embeddings/token_type_embeddings': f'{prefix}.embeddings.token_type_embeddings.weight',
29
+ 'bert/embeddings/LayerNorm/beta': f'{prefix}.embeddings.LayerNorm.bias',
30
+ 'bert/embeddings/LayerNorm/gamma': f'{prefix}.embeddings.LayerNorm.weight',
31
+ 'cls/predictions/transform/dense/kernel': 'cls.predictions.transform.dense.weight##',
32
+ 'cls/predictions/transform/dense/bias': 'cls.predictions.transform.dense.bias',
33
+ 'cls/predictions/transform/LayerNorm/beta': 'cls.predictions.transform.LayerNorm.bias',
34
+ 'cls/predictions/transform/LayerNorm/gamma': 'cls.predictions.transform.LayerNorm.weight',
35
+ 'cls/predictions/output_bias': 'cls.predictions.bias'}
36
+
37
+ if ('embedding_size' in config) and (config['embedding_size'] != config['hidden_size']):
38
+ mapping.update({'bert/encoder/embedding_hidden_mapping_in/kernel': f'{prefix}.encoder.embedding_hidden_mapping_in.weight##',
39
+ 'bert/encoder/embedding_hidden_mapping_in/bias': f'{prefix}.encoder.embedding_hidden_mapping_in.bias'})
40
+
41
+ for i in range(num_layers):
42
+ prefix_i = f'{prefix}.encoder.layer.%d.' % i
43
+ mapping.update({
44
+ f'bert/encoder/layer_{i}/attention/self/query/kernel': prefix_i + 'attention.self.query.weight##', # 转置标识
45
+ f'bert/encoder/layer_{i}/attention/self/query/bias': prefix_i + 'attention.self.query.bias',
46
+ f'bert/encoder/layer_{i}/attention/self/key/kernel': prefix_i + 'attention.self.key.weight##',
47
+ f'bert/encoder/layer_{i}/attention/self/key/bias': prefix_i + 'attention.self.key.bias',
48
+ f'bert/encoder/layer_{i}/attention/self/value/kernel': prefix_i + 'attention.self.value.weight##',
49
+ f'bert/encoder/layer_{i}/attention/self/value/bias': prefix_i + 'attention.self.value.bias',
50
+ f'bert/encoder/layer_{i}/attention/output/dense/kernel': prefix_i + 'attention.output.dense.weight##',
51
+ f'bert/encoder/layer_{i}/attention/output/dense/bias': prefix_i + 'attention.output.dense.bias',
52
+ f'bert/encoder/layer_{i}/attention/output/LayerNorm/beta': prefix_i + 'attention.output.LayerNorm.bias',
53
+ f'bert/encoder/layer_{i}/attention/output/LayerNorm/gamma': prefix_i + 'attention.output.LayerNorm.weight',
54
+ f'bert/encoder/layer_{i}/intermediate/dense/kernel': prefix_i + 'intermediate.dense.weight##',
55
+ f'bert/encoder/layer_{i}/intermediate/dense/bias': prefix_i + 'intermediate.dense.bias',
56
+ f'bert/encoder/layer_{i}/output/dense/kernel': prefix_i + 'output.dense.weight##',
57
+ f'bert/encoder/layer_{i}/output/dense/bias': prefix_i + 'output.dense.bias',
58
+ f'bert/encoder/layer_{i}/output/LayerNorm/beta': prefix_i + 'output.LayerNorm.bias',
59
+ f'bert/encoder/layer_{i}/output/LayerNorm/gamma': prefix_i + 'output.LayerNorm.weight'
60
+ })
61
+
62
+
63
+ for key, value in mapping.items():
64
+ ts = tf.train.load_variable(tf_path, key)
65
+ if value.endswith('##'):
66
+ value = value.replace('##', '')
67
+ torch_state_dict[value] = torch.from_numpy(ts).T
68
+ else:
69
+ torch_state_dict[value] = torch.from_numpy(ts)
70
+ torch_state_dict['cls.predictions.decoder.weight'] = torch_state_dict[f'{prefix}.embeddings.word_embeddings.weight']
71
+ torch_state_dict['cls.predictions.decoder.bias'] = torch_state_dict['cls.predictions.bias']
72
+
73
+ torch.save(torch_state_dict, torch_path)
74
+
75
+
76
+ convert('E:/pretrain_ckpt/roberta/sushen@chinese_roberta_L-6_H-384_A-12/')
vocab.txt ADDED
The diff for this file is too large to render. See raw diff