Spaces:
Build error
Build error
import torch | |
import sys | |
from transformers import AutoTokenizer, AutoModelForMaskedLM | |
import os | |
#如果D:\pyprojs\Bert-VITS2\bert\chinese-roberta-wwm-ext-large\pytorch_model存在就用这个 | |
local_bert = False | |
if os.path.exists("./bert/chinese-roberta-wwm-ext-large/pytorch_model.bin"): | |
local_bert = True | |
tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large") if local_bert else AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext-large") | |
models = dict() | |
def get_bert_feature(text, word2ph, device=None): | |
if ( | |
sys.platform == "darwin" | |
and torch.backends.mps.is_available() | |
and device == "cpu" | |
): | |
device = "mps" | |
if not device: | |
device = "cuda" | |
if device not in models.keys(): | |
models[device] = AutoModelForMaskedLM.from_pretrained( | |
"./bert/chinese-roberta-wwm-ext-large" | |
).to(device) if local_bert else AutoModelForMaskedLM.from_pretrained( | |
"hfl/chinese-roberta-wwm-ext-large" | |
).to(device) | |
with torch.no_grad(): | |
inputs = tokenizer(text, return_tensors="pt") | |
for i in inputs: | |
inputs[i] = inputs[i].to(device) | |
res = models[device](**inputs, output_hidden_states=True) | |
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() | |
assert len(word2ph) == len(text) + 2 | |
word2phone = word2ph | |
phone_level_feature = [] | |
for i in range(len(word2phone)): | |
repeat_feature = res[i].repeat(word2phone[i], 1) | |
phone_level_feature.append(repeat_feature) | |
phone_level_feature = torch.cat(phone_level_feature, dim=0) | |
return phone_level_feature.T | |
if __name__ == "__main__": | |
import torch | |
word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 | |
word2phone = [ | |
1, | |
2, | |
1, | |
2, | |
2, | |
1, | |
2, | |
2, | |
1, | |
2, | |
2, | |
1, | |
2, | |
2, | |
2, | |
2, | |
2, | |
1, | |
1, | |
2, | |
2, | |
1, | |
2, | |
2, | |
2, | |
2, | |
1, | |
2, | |
2, | |
2, | |
2, | |
2, | |
1, | |
2, | |
2, | |
2, | |
2, | |
1, | |
] | |
# 计算总帧数 | |
total_frames = sum(word2phone) | |
print(word_level_feature.shape) | |
print(word2phone) | |
phone_level_feature = [] | |
for i in range(len(word2phone)): | |
print(word_level_feature[i].shape) | |
# 对每个词重复word2phone[i]次 | |
repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) | |
phone_level_feature.append(repeat_feature) | |
phone_level_feature = torch.cat(phone_level_feature, dim=0) | |
print(phone_level_feature.shape) # torch.Size([36, 1024]) | |