Tongjilibo
commited on
Commit
•
6b449f3
1
Parent(s):
5ad53e1
修改部分配置文件
Browse files- {CDial-GPT-LCCC-base → CDial-GPT_LCCC-base}/bert4torch_config.json +0 -0
- {CDial-GPT-LCCC-base → CDial-GPT_LCCC-base}/bert4torch_vocab.txt +0 -0
- {CDial-GPT-LCCC-large → CDial-GPT_LCCC-large}/bert4torch_config.json +0 -0
- {CDial-GPT-LCCC-large → CDial-GPT_LCCC-large}/bert4torch_vocab.txt +0 -0
- roformer_chinese_sim_char_base/README.md +4 -0
- roformer_chinese_sim_char_base/convert.py +69 -0
- roformer_chinese_sim_char_ft_base/README.md +4 -0
- roformer_chinese_sim_char_ft_base/convert.py +69 -0
- roformer_chinese_sim_char_ft_small/README.md +4 -0
- roformer_chinese_sim_char_ft_small/convert.py +69 -0
- roformer_chinese_sim_char_small/README.md +4 -0
- roformer_chinese_sim_char_small/convert.py +69 -0
- simbert-chinese-base/bert4torch_config.json +0 -22
- simbert_chinese_small/bert4torch_config.json +0 -17
- simbert_chinese_tiny/bert4torch_config.json +0 -17
{CDial-GPT-LCCC-base → CDial-GPT_LCCC-base}/bert4torch_config.json
RENAMED
File without changes
|
{CDial-GPT-LCCC-base → CDial-GPT_LCCC-base}/bert4torch_vocab.txt
RENAMED
File without changes
|
{CDial-GPT-LCCC-large → CDial-GPT_LCCC-large}/bert4torch_config.json
RENAMED
File without changes
|
{CDial-GPT-LCCC-large → CDial-GPT_LCCC-large}/bert4torch_vocab.txt
RENAMED
File without changes
|
roformer_chinese_sim_char_base/README.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 说明
|
2 |
+
|
3 |
+
- 可直接下载第三方用户转好的[pytorch权重](https://huggingface.co/junnyu/roformer_chinese_sim_char_base)
|
4 |
+
- 也可下载[tf权重](https://github.com/ZhuiyiTechnology/roformer-sim), 并使用convert.py脚本转换
|
roformer_chinese_sim_char_base/convert.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# roformer-sim(simbert v2)预训练模型tensorflow转pytorch
|
2 |
+
# 源项目:https://github.com/ZhuiyiTechnology/roformer-sim
|
3 |
+
# 跟simbert(v1)最主要的不同是不需要加载位置编码的部分,苏神ckpt也同样无该部分,加载进模型后使用rope位置编码
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import tensorflow as tf
|
7 |
+
import json
|
8 |
+
|
9 |
+
tf_dir = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/'
|
10 |
+
tf_path = tf_dir + 'bert_model.ckpt'
|
11 |
+
torch_path = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/pytorch_model.bin'
|
12 |
+
|
13 |
+
with open(tf_dir + 'bert_config.json', 'r') as f:
|
14 |
+
config = json.load(f)
|
15 |
+
num_layers = config['num_hidden_layers']
|
16 |
+
|
17 |
+
torch_state_dict = {}
|
18 |
+
|
19 |
+
prefix = 'roformer'
|
20 |
+
mapping = {
|
21 |
+
'bert/embeddings/word_embeddings': f'{prefix}.embeddings.word_embeddings.weight',
|
22 |
+
'bert/embeddings/token_type_embeddings': f'{prefix}.embeddings.token_type_embeddings.weight',
|
23 |
+
'bert/embeddings/LayerNorm/beta': f'{prefix}.embeddings.LayerNorm.bias',
|
24 |
+
'bert/embeddings/LayerNorm/gamma': f'{prefix}.embeddings.LayerNorm.weight',
|
25 |
+
'cls/predictions/transform/dense/kernel': 'cls.predictions.transform.dense.weight##',
|
26 |
+
'cls/predictions/transform/dense/bias': 'cls.predictions.transform.dense.bias',
|
27 |
+
'cls/predictions/transform/LayerNorm/beta': 'cls.predictions.transform.LayerNorm.bias',
|
28 |
+
'cls/predictions/transform/LayerNorm/gamma': 'cls.predictions.transform.LayerNorm.weight',
|
29 |
+
'cls/predictions/output_bias': 'cls.predictions.bias',
|
30 |
+
'bert/pooler/dense/kernel': f'{prefix}.pooler.dense.weight##',
|
31 |
+
'bert/pooler/dense/bias': f'{prefix}.pooler.dense.bias'}
|
32 |
+
|
33 |
+
if ('embedding_size' in config) and (config['embedding_size'] != config['hidden_size']):
|
34 |
+
mapping.update({'bert/encoder/embedding_hidden_mapping_in/kernel': f'{prefix}.encoder.embedding_hidden_mapping_in.weight##',
|
35 |
+
'bert/encoder/embedding_hidden_mapping_in/bias': f'{prefix}.encoder.embedding_hidden_mapping_in.bias'})
|
36 |
+
|
37 |
+
for i in range(num_layers):
|
38 |
+
prefix_i = f'{prefix}.encoder.layer.%d.' % i
|
39 |
+
mapping.update({
|
40 |
+
f'bert/encoder/layer_{i}/attention/self/query/kernel': prefix_i + 'attention.self.query.weight##', # 转置标识
|
41 |
+
f'bert/encoder/layer_{i}/attention/self/query/bias': prefix_i + 'attention.self.query.bias',
|
42 |
+
f'bert/encoder/layer_{i}/attention/self/key/kernel': prefix_i + 'attention.self.key.weight##',
|
43 |
+
f'bert/encoder/layer_{i}/attention/self/key/bias': prefix_i + 'attention.self.key.bias',
|
44 |
+
f'bert/encoder/layer_{i}/attention/self/value/kernel': prefix_i + 'attention.self.value.weight##',
|
45 |
+
f'bert/encoder/layer_{i}/attention/self/value/bias': prefix_i + 'attention.self.value.bias',
|
46 |
+
f'bert/encoder/layer_{i}/attention/output/dense/kernel': prefix_i + 'attention.output.dense.weight##',
|
47 |
+
f'bert/encoder/layer_{i}/attention/output/dense/bias': prefix_i + 'attention.output.dense.bias',
|
48 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/beta': prefix_i + 'attention.output.LayerNorm.bias',
|
49 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/gamma': prefix_i + 'attention.output.LayerNorm.weight',
|
50 |
+
f'bert/encoder/layer_{i}/intermediate/dense/kernel': prefix_i + 'intermediate.dense.weight##',
|
51 |
+
f'bert/encoder/layer_{i}/intermediate/dense/bias': prefix_i + 'intermediate.dense.bias',
|
52 |
+
f'bert/encoder/layer_{i}/output/dense/kernel': prefix_i + 'output.dense.weight##',
|
53 |
+
f'bert/encoder/layer_{i}/output/dense/bias': prefix_i + 'output.dense.bias',
|
54 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/beta': prefix_i + 'output.LayerNorm.bias',
|
55 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/gamma': prefix_i + 'output.LayerNorm.weight'
|
56 |
+
})
|
57 |
+
|
58 |
+
|
59 |
+
for key, value in mapping.items():
|
60 |
+
ts = tf.train.load_variable(tf_path, key)
|
61 |
+
if value.endswith('##'):
|
62 |
+
value = value.replace('##', '')
|
63 |
+
torch_state_dict[value] = torch.from_numpy(ts).T
|
64 |
+
else:
|
65 |
+
torch_state_dict[value] = torch.from_numpy(ts)
|
66 |
+
torch_state_dict['cls.predictions.decoder.weight'] = torch_state_dict[f'{prefix}.embeddings.word_embeddings.weight']
|
67 |
+
torch_state_dict['cls.predictions.decoder.bias'] = torch_state_dict['cls.predictions.bias']
|
68 |
+
|
69 |
+
torch.save(torch_state_dict, torch_path)
|
roformer_chinese_sim_char_ft_base/README.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 说明
|
2 |
+
|
3 |
+
- 可直接下载第三方用户转好的[pytorch权重](https://huggingface.co/junnyu/roformer_chinese_sim_char_ft_base)
|
4 |
+
- 也可下载[tf权重](https://github.com/ZhuiyiTechnology/roformer-sim), 并使用convert.py脚本转换
|
roformer_chinese_sim_char_ft_base/convert.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# roformer-sim(simbert v2)预训练模型tensorflow转pytorch
|
2 |
+
# 源项目:https://github.com/ZhuiyiTechnology/roformer-sim
|
3 |
+
# 跟simbert(v1)最主要的不同是不需要加载位置编码的部分,苏神ckpt也同样无该部分,加载进模型后使用rope位置编码
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import tensorflow as tf
|
7 |
+
import json
|
8 |
+
|
9 |
+
tf_dir = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/'
|
10 |
+
tf_path = tf_dir + 'bert_model.ckpt'
|
11 |
+
torch_path = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/pytorch_model.bin'
|
12 |
+
|
13 |
+
with open(tf_dir + 'bert_config.json', 'r') as f:
|
14 |
+
config = json.load(f)
|
15 |
+
num_layers = config['num_hidden_layers']
|
16 |
+
|
17 |
+
torch_state_dict = {}
|
18 |
+
|
19 |
+
prefix = 'roformer'
|
20 |
+
mapping = {
|
21 |
+
'bert/embeddings/word_embeddings': f'{prefix}.embeddings.word_embeddings.weight',
|
22 |
+
'bert/embeddings/token_type_embeddings': f'{prefix}.embeddings.token_type_embeddings.weight',
|
23 |
+
'bert/embeddings/LayerNorm/beta': f'{prefix}.embeddings.LayerNorm.bias',
|
24 |
+
'bert/embeddings/LayerNorm/gamma': f'{prefix}.embeddings.LayerNorm.weight',
|
25 |
+
'cls/predictions/transform/dense/kernel': 'cls.predictions.transform.dense.weight##',
|
26 |
+
'cls/predictions/transform/dense/bias': 'cls.predictions.transform.dense.bias',
|
27 |
+
'cls/predictions/transform/LayerNorm/beta': 'cls.predictions.transform.LayerNorm.bias',
|
28 |
+
'cls/predictions/transform/LayerNorm/gamma': 'cls.predictions.transform.LayerNorm.weight',
|
29 |
+
'cls/predictions/output_bias': 'cls.predictions.bias',
|
30 |
+
'bert/pooler/dense/kernel': f'{prefix}.pooler.dense.weight##',
|
31 |
+
'bert/pooler/dense/bias': f'{prefix}.pooler.dense.bias'}
|
32 |
+
|
33 |
+
if ('embedding_size' in config) and (config['embedding_size'] != config['hidden_size']):
|
34 |
+
mapping.update({'bert/encoder/embedding_hidden_mapping_in/kernel': f'{prefix}.encoder.embedding_hidden_mapping_in.weight##',
|
35 |
+
'bert/encoder/embedding_hidden_mapping_in/bias': f'{prefix}.encoder.embedding_hidden_mapping_in.bias'})
|
36 |
+
|
37 |
+
for i in range(num_layers):
|
38 |
+
prefix_i = f'{prefix}.encoder.layer.%d.' % i
|
39 |
+
mapping.update({
|
40 |
+
f'bert/encoder/layer_{i}/attention/self/query/kernel': prefix_i + 'attention.self.query.weight##', # 转置标识
|
41 |
+
f'bert/encoder/layer_{i}/attention/self/query/bias': prefix_i + 'attention.self.query.bias',
|
42 |
+
f'bert/encoder/layer_{i}/attention/self/key/kernel': prefix_i + 'attention.self.key.weight##',
|
43 |
+
f'bert/encoder/layer_{i}/attention/self/key/bias': prefix_i + 'attention.self.key.bias',
|
44 |
+
f'bert/encoder/layer_{i}/attention/self/value/kernel': prefix_i + 'attention.self.value.weight##',
|
45 |
+
f'bert/encoder/layer_{i}/attention/self/value/bias': prefix_i + 'attention.self.value.bias',
|
46 |
+
f'bert/encoder/layer_{i}/attention/output/dense/kernel': prefix_i + 'attention.output.dense.weight##',
|
47 |
+
f'bert/encoder/layer_{i}/attention/output/dense/bias': prefix_i + 'attention.output.dense.bias',
|
48 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/beta': prefix_i + 'attention.output.LayerNorm.bias',
|
49 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/gamma': prefix_i + 'attention.output.LayerNorm.weight',
|
50 |
+
f'bert/encoder/layer_{i}/intermediate/dense/kernel': prefix_i + 'intermediate.dense.weight##',
|
51 |
+
f'bert/encoder/layer_{i}/intermediate/dense/bias': prefix_i + 'intermediate.dense.bias',
|
52 |
+
f'bert/encoder/layer_{i}/output/dense/kernel': prefix_i + 'output.dense.weight##',
|
53 |
+
f'bert/encoder/layer_{i}/output/dense/bias': prefix_i + 'output.dense.bias',
|
54 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/beta': prefix_i + 'output.LayerNorm.bias',
|
55 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/gamma': prefix_i + 'output.LayerNorm.weight'
|
56 |
+
})
|
57 |
+
|
58 |
+
|
59 |
+
for key, value in mapping.items():
|
60 |
+
ts = tf.train.load_variable(tf_path, key)
|
61 |
+
if value.endswith('##'):
|
62 |
+
value = value.replace('##', '')
|
63 |
+
torch_state_dict[value] = torch.from_numpy(ts).T
|
64 |
+
else:
|
65 |
+
torch_state_dict[value] = torch.from_numpy(ts)
|
66 |
+
torch_state_dict['cls.predictions.decoder.weight'] = torch_state_dict[f'{prefix}.embeddings.word_embeddings.weight']
|
67 |
+
torch_state_dict['cls.predictions.decoder.bias'] = torch_state_dict['cls.predictions.bias']
|
68 |
+
|
69 |
+
torch.save(torch_state_dict, torch_path)
|
roformer_chinese_sim_char_ft_small/README.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 说明
|
2 |
+
|
3 |
+
- 可直接下载第三方用户转好的[pytorch权重](https://huggingface.co/junnyu/roformer_chinese_sim_char_ft_small)
|
4 |
+
- 也可下载[tf权重](https://github.com/ZhuiyiTechnology/roformer-sim), 并使用convert.py脚本转换
|
roformer_chinese_sim_char_ft_small/convert.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# roformer-sim(simbert v2)预训练模型tensorflow转pytorch
|
2 |
+
# 源项目:https://github.com/ZhuiyiTechnology/roformer-sim
|
3 |
+
# 跟simbert(v1)最主要的不同是不需要加载位置编码的部分,苏神ckpt也同样无该部分,加载进模型后使用rope位置编码
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import tensorflow as tf
|
7 |
+
import json
|
8 |
+
|
9 |
+
tf_dir = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/'
|
10 |
+
tf_path = tf_dir + 'bert_model.ckpt'
|
11 |
+
torch_path = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/pytorch_model.bin'
|
12 |
+
|
13 |
+
with open(tf_dir + 'bert_config.json', 'r') as f:
|
14 |
+
config = json.load(f)
|
15 |
+
num_layers = config['num_hidden_layers']
|
16 |
+
|
17 |
+
torch_state_dict = {}
|
18 |
+
|
19 |
+
prefix = 'roformer'
|
20 |
+
mapping = {
|
21 |
+
'bert/embeddings/word_embeddings': f'{prefix}.embeddings.word_embeddings.weight',
|
22 |
+
'bert/embeddings/token_type_embeddings': f'{prefix}.embeddings.token_type_embeddings.weight',
|
23 |
+
'bert/embeddings/LayerNorm/beta': f'{prefix}.embeddings.LayerNorm.bias',
|
24 |
+
'bert/embeddings/LayerNorm/gamma': f'{prefix}.embeddings.LayerNorm.weight',
|
25 |
+
'cls/predictions/transform/dense/kernel': 'cls.predictions.transform.dense.weight##',
|
26 |
+
'cls/predictions/transform/dense/bias': 'cls.predictions.transform.dense.bias',
|
27 |
+
'cls/predictions/transform/LayerNorm/beta': 'cls.predictions.transform.LayerNorm.bias',
|
28 |
+
'cls/predictions/transform/LayerNorm/gamma': 'cls.predictions.transform.LayerNorm.weight',
|
29 |
+
'cls/predictions/output_bias': 'cls.predictions.bias',
|
30 |
+
'bert/pooler/dense/kernel': f'{prefix}.pooler.dense.weight##',
|
31 |
+
'bert/pooler/dense/bias': f'{prefix}.pooler.dense.bias'}
|
32 |
+
|
33 |
+
if ('embedding_size' in config) and (config['embedding_size'] != config['hidden_size']):
|
34 |
+
mapping.update({'bert/encoder/embedding_hidden_mapping_in/kernel': f'{prefix}.encoder.embedding_hidden_mapping_in.weight##',
|
35 |
+
'bert/encoder/embedding_hidden_mapping_in/bias': f'{prefix}.encoder.embedding_hidden_mapping_in.bias'})
|
36 |
+
|
37 |
+
for i in range(num_layers):
|
38 |
+
prefix_i = f'{prefix}.encoder.layer.%d.' % i
|
39 |
+
mapping.update({
|
40 |
+
f'bert/encoder/layer_{i}/attention/self/query/kernel': prefix_i + 'attention.self.query.weight##', # 转置标识
|
41 |
+
f'bert/encoder/layer_{i}/attention/self/query/bias': prefix_i + 'attention.self.query.bias',
|
42 |
+
f'bert/encoder/layer_{i}/attention/self/key/kernel': prefix_i + 'attention.self.key.weight##',
|
43 |
+
f'bert/encoder/layer_{i}/attention/self/key/bias': prefix_i + 'attention.self.key.bias',
|
44 |
+
f'bert/encoder/layer_{i}/attention/self/value/kernel': prefix_i + 'attention.self.value.weight##',
|
45 |
+
f'bert/encoder/layer_{i}/attention/self/value/bias': prefix_i + 'attention.self.value.bias',
|
46 |
+
f'bert/encoder/layer_{i}/attention/output/dense/kernel': prefix_i + 'attention.output.dense.weight##',
|
47 |
+
f'bert/encoder/layer_{i}/attention/output/dense/bias': prefix_i + 'attention.output.dense.bias',
|
48 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/beta': prefix_i + 'attention.output.LayerNorm.bias',
|
49 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/gamma': prefix_i + 'attention.output.LayerNorm.weight',
|
50 |
+
f'bert/encoder/layer_{i}/intermediate/dense/kernel': prefix_i + 'intermediate.dense.weight##',
|
51 |
+
f'bert/encoder/layer_{i}/intermediate/dense/bias': prefix_i + 'intermediate.dense.bias',
|
52 |
+
f'bert/encoder/layer_{i}/output/dense/kernel': prefix_i + 'output.dense.weight##',
|
53 |
+
f'bert/encoder/layer_{i}/output/dense/bias': prefix_i + 'output.dense.bias',
|
54 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/beta': prefix_i + 'output.LayerNorm.bias',
|
55 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/gamma': prefix_i + 'output.LayerNorm.weight'
|
56 |
+
})
|
57 |
+
|
58 |
+
|
59 |
+
for key, value in mapping.items():
|
60 |
+
ts = tf.train.load_variable(tf_path, key)
|
61 |
+
if value.endswith('##'):
|
62 |
+
value = value.replace('##', '')
|
63 |
+
torch_state_dict[value] = torch.from_numpy(ts).T
|
64 |
+
else:
|
65 |
+
torch_state_dict[value] = torch.from_numpy(ts)
|
66 |
+
torch_state_dict['cls.predictions.decoder.weight'] = torch_state_dict[f'{prefix}.embeddings.word_embeddings.weight']
|
67 |
+
torch_state_dict['cls.predictions.decoder.bias'] = torch_state_dict['cls.predictions.bias']
|
68 |
+
|
69 |
+
torch.save(torch_state_dict, torch_path)
|
roformer_chinese_sim_char_small/README.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 说明
|
2 |
+
|
3 |
+
- 可直接下载第三方用户转好的[pytorch权重](https://huggingface.co/junnyu/roformer_chinese_sim_char_small)
|
4 |
+
- 也可下载[tf权重](https://github.com/ZhuiyiTechnology/roformer-sim), 并使用convert.py脚本转换
|
roformer_chinese_sim_char_small/convert.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# roformer-sim(simbert v2)预训练模型tensorflow转pytorch
|
2 |
+
# 源项目:https://github.com/ZhuiyiTechnology/roformer-sim
|
3 |
+
# 跟simbert(v1)最主要的不同是不需要加载位置编码的部分,苏神ckpt也同样无该部分,加载进模型后使用rope位置编码
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import tensorflow as tf
|
7 |
+
import json
|
8 |
+
|
9 |
+
tf_dir = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/'
|
10 |
+
tf_path = tf_dir + 'bert_model.ckpt'
|
11 |
+
torch_path = 'E:/pretrain_ckpt/simbert/sushen@chinese_roformer-sim-char_L-12_H-768_A-12/pytorch_model.bin'
|
12 |
+
|
13 |
+
with open(tf_dir + 'bert_config.json', 'r') as f:
|
14 |
+
config = json.load(f)
|
15 |
+
num_layers = config['num_hidden_layers']
|
16 |
+
|
17 |
+
torch_state_dict = {}
|
18 |
+
|
19 |
+
prefix = 'roformer'
|
20 |
+
mapping = {
|
21 |
+
'bert/embeddings/word_embeddings': f'{prefix}.embeddings.word_embeddings.weight',
|
22 |
+
'bert/embeddings/token_type_embeddings': f'{prefix}.embeddings.token_type_embeddings.weight',
|
23 |
+
'bert/embeddings/LayerNorm/beta': f'{prefix}.embeddings.LayerNorm.bias',
|
24 |
+
'bert/embeddings/LayerNorm/gamma': f'{prefix}.embeddings.LayerNorm.weight',
|
25 |
+
'cls/predictions/transform/dense/kernel': 'cls.predictions.transform.dense.weight##',
|
26 |
+
'cls/predictions/transform/dense/bias': 'cls.predictions.transform.dense.bias',
|
27 |
+
'cls/predictions/transform/LayerNorm/beta': 'cls.predictions.transform.LayerNorm.bias',
|
28 |
+
'cls/predictions/transform/LayerNorm/gamma': 'cls.predictions.transform.LayerNorm.weight',
|
29 |
+
'cls/predictions/output_bias': 'cls.predictions.bias',
|
30 |
+
'bert/pooler/dense/kernel': f'{prefix}.pooler.dense.weight##',
|
31 |
+
'bert/pooler/dense/bias': f'{prefix}.pooler.dense.bias'}
|
32 |
+
|
33 |
+
if ('embedding_size' in config) and (config['embedding_size'] != config['hidden_size']):
|
34 |
+
mapping.update({'bert/encoder/embedding_hidden_mapping_in/kernel': f'{prefix}.encoder.embedding_hidden_mapping_in.weight##',
|
35 |
+
'bert/encoder/embedding_hidden_mapping_in/bias': f'{prefix}.encoder.embedding_hidden_mapping_in.bias'})
|
36 |
+
|
37 |
+
for i in range(num_layers):
|
38 |
+
prefix_i = f'{prefix}.encoder.layer.%d.' % i
|
39 |
+
mapping.update({
|
40 |
+
f'bert/encoder/layer_{i}/attention/self/query/kernel': prefix_i + 'attention.self.query.weight##', # 转置标识
|
41 |
+
f'bert/encoder/layer_{i}/attention/self/query/bias': prefix_i + 'attention.self.query.bias',
|
42 |
+
f'bert/encoder/layer_{i}/attention/self/key/kernel': prefix_i + 'attention.self.key.weight##',
|
43 |
+
f'bert/encoder/layer_{i}/attention/self/key/bias': prefix_i + 'attention.self.key.bias',
|
44 |
+
f'bert/encoder/layer_{i}/attention/self/value/kernel': prefix_i + 'attention.self.value.weight##',
|
45 |
+
f'bert/encoder/layer_{i}/attention/self/value/bias': prefix_i + 'attention.self.value.bias',
|
46 |
+
f'bert/encoder/layer_{i}/attention/output/dense/kernel': prefix_i + 'attention.output.dense.weight##',
|
47 |
+
f'bert/encoder/layer_{i}/attention/output/dense/bias': prefix_i + 'attention.output.dense.bias',
|
48 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/beta': prefix_i + 'attention.output.LayerNorm.bias',
|
49 |
+
f'bert/encoder/layer_{i}/attention/output/LayerNorm/gamma': prefix_i + 'attention.output.LayerNorm.weight',
|
50 |
+
f'bert/encoder/layer_{i}/intermediate/dense/kernel': prefix_i + 'intermediate.dense.weight##',
|
51 |
+
f'bert/encoder/layer_{i}/intermediate/dense/bias': prefix_i + 'intermediate.dense.bias',
|
52 |
+
f'bert/encoder/layer_{i}/output/dense/kernel': prefix_i + 'output.dense.weight##',
|
53 |
+
f'bert/encoder/layer_{i}/output/dense/bias': prefix_i + 'output.dense.bias',
|
54 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/beta': prefix_i + 'output.LayerNorm.bias',
|
55 |
+
f'bert/encoder/layer_{i}/output/LayerNorm/gamma': prefix_i + 'output.LayerNorm.weight'
|
56 |
+
})
|
57 |
+
|
58 |
+
|
59 |
+
for key, value in mapping.items():
|
60 |
+
ts = tf.train.load_variable(tf_path, key)
|
61 |
+
if value.endswith('##'):
|
62 |
+
value = value.replace('##', '')
|
63 |
+
torch_state_dict[value] = torch.from_numpy(ts).T
|
64 |
+
else:
|
65 |
+
torch_state_dict[value] = torch.from_numpy(ts)
|
66 |
+
torch_state_dict['cls.predictions.decoder.weight'] = torch_state_dict[f'{prefix}.embeddings.word_embeddings.weight']
|
67 |
+
torch_state_dict['cls.predictions.decoder.bias'] = torch_state_dict['cls.predictions.bias']
|
68 |
+
|
69 |
+
torch.save(torch_state_dict, torch_path)
|
simbert-chinese-base/bert4torch_config.json
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"attention_probs_dropout_prob": 0.1,
|
3 |
-
"directionality": "bidi",
|
4 |
-
"hidden_act": "gelu",
|
5 |
-
"hidden_dropout_prob": 0.1,
|
6 |
-
"hidden_size": 768,
|
7 |
-
"initializer_range": 0.02,
|
8 |
-
"intermediate_size": 3072,
|
9 |
-
"max_position_embeddings": 512,
|
10 |
-
"model_type": "bert",
|
11 |
-
"num_attention_heads": 12,
|
12 |
-
"num_hidden_layers": 12,
|
13 |
-
"pooler_fc_size": 768,
|
14 |
-
"pooler_num_attention_heads": 12,
|
15 |
-
"pooler_num_fc_layers": 3,
|
16 |
-
"pooler_size_per_head": 128,
|
17 |
-
"pooler_type": "first_token_transform",
|
18 |
-
"type_vocab_size": 2,
|
19 |
-
"vocab_size": 13685,
|
20 |
-
"with_pool": "linear",
|
21 |
-
"pool_strategy": "pooler"
|
22 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
simbert_chinese_small/bert4torch_config.json
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"attention_probs_dropout_prob": 0.0,
|
3 |
-
"directionality": "bidi",
|
4 |
-
"hidden_act": "gelu",
|
5 |
-
"hidden_dropout_prob": 0.0,
|
6 |
-
"hidden_size": 384,
|
7 |
-
"embedding_size": 128,
|
8 |
-
"initializer_range": 0.02,
|
9 |
-
"intermediate_size": 1536,
|
10 |
-
"max_position_embeddings": 512,
|
11 |
-
"num_attention_heads": 12,
|
12 |
-
"num_hidden_layers": 6,
|
13 |
-
"type_vocab_size": 2,
|
14 |
-
"vocab_size": 13685,
|
15 |
-
"with_pool": "linear",
|
16 |
-
"pool_strategy": "pooler"
|
17 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
simbert_chinese_tiny/bert4torch_config.json
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"attention_probs_dropout_prob": 0.0,
|
3 |
-
"directionality": "bidi",
|
4 |
-
"hidden_act": "gelu",
|
5 |
-
"hidden_dropout_prob": 0.0,
|
6 |
-
"hidden_size": 312,
|
7 |
-
"embedding_size": 128,
|
8 |
-
"initializer_range": 0.02,
|
9 |
-
"intermediate_size": 1248,
|
10 |
-
"max_position_embeddings": 512,
|
11 |
-
"num_attention_heads": 12,
|
12 |
-
"num_hidden_layers": 4,
|
13 |
-
"type_vocab_size": 2,
|
14 |
-
"vocab_size": 13685,
|
15 |
-
"with_pool": "linear",
|
16 |
-
"pool_strategy": "pooler"
|
17 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|