Spaces:
Runtime error
Runtime error
import sys | |
import torch | |
from transformers import AutoModelForMaskedLM, AutoTokenizer | |
from config import config | |
from text.japanese import text2sep_kata, text_normalize | |
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm" | |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH) | |
models = dict() | |
def get_bert_feature( | |
text, | |
word2ph, | |
device=config.bert_gen_config.device, | |
assist_text=None, | |
assist_text_weight=0.7, | |
ignore_unknown=False, | |
): | |
text = "".join(text2sep_kata(text, ignore_unknown=ignore_unknown)[0]) | |
# text = text_normalize(text) | |
if assist_text: | |
assist_text = "".join(text2sep_kata(assist_text)[0]) | |
if ( | |
sys.platform == "darwin" | |
and torch.backends.mps.is_available() | |
and device == "cpu" | |
): | |
device = "mps" | |
if not device: | |
device = "cuda" | |
if device == "cuda" and not torch.cuda.is_available(): | |
device = "cpu" | |
if device not in models.keys(): | |
models[device] = AutoModelForMaskedLM.from_pretrained(LOCAL_PATH).to(device) | |
with torch.no_grad(): | |
inputs = tokenizer(text, return_tensors="pt") | |
for i in inputs: | |
inputs[i] = inputs[i].to(device) | |
res = models[device](**inputs, output_hidden_states=True) | |
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu() | |
if assist_text: | |
style_inputs = tokenizer(assist_text, return_tensors="pt") | |
for i in style_inputs: | |
style_inputs[i] = style_inputs[i].to(device) | |
style_res = models[device](**style_inputs, output_hidden_states=True) | |
style_res = torch.cat(style_res["hidden_states"][-3:-2], -1)[0].cpu() | |
style_res_mean = style_res.mean(0) | |
assert len(word2ph) == len(text) + 2, text | |
word2phone = word2ph | |
phone_level_feature = [] | |
for i in range(len(word2phone)): | |
if assist_text: | |
repeat_feature = ( | |
res[i].repeat(word2phone[i], 1) * (1 - assist_text_weight) | |
+ style_res_mean.repeat(word2phone[i], 1) * assist_text_weight | |
) | |
else: | |
repeat_feature = res[i].repeat(word2phone[i], 1) | |
phone_level_feature.append(repeat_feature) | |
phone_level_feature = torch.cat(phone_level_feature, dim=0) | |
return phone_level_feature.T | |