import sys
import os
import time
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#model_dir = os.path.abspath("./pretrain_yangmi")
model_dir = os.path.abspath(sys.argv[1])

custom_infer_abs = {
    'voice_name':
    'F7',
    'am_ckpt':
    os.path.join(model_dir, 'tmp_am', 'ckpt'),
    'am_config':
    os.path.join(model_dir, 'tmp_am', 'config.yaml'),
    'voc_ckpt':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
    'voc_config':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
             'config.yaml'),
    'audio_config':
    os.path.join(model_dir, 'data', 'audio_config.yaml'),
    'se_file':
    os.path.join(model_dir, 'data', 'se', 'se.npy')
}
kwargs = {'custom_ckpt': custom_infer_abs}

model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)

inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
srctxt = sys.argv[2]
dstpath = sys.argv[3]
with open(srctxt, 'r') as f:
   alltxt = f.readlines() 
for txtline in alltxt:
    txtline=txtline.strip().split()
    start = time.time()
    output = inference(input=txtline[1])
    print('spend time {}'.format(time.time()-start))
    wav = output[OutputKeys.OUTPUT_WAV]
    with open(os.path.join(dstpath,txtline[0]+".wav"), 'wb') as f:
        f.write(wav)

print('am para')
model_para = model_id.voices['F7'].am.state_dict()
for k,v in model_para.items():
     print("{:20s} {}".format(k, v.shape))

print('voc para')
model_para = model_id.voices['F7'].voc_model.state_dict()
for k,v in model_para.items():
     print("{:20s} {}".format(k, v.shape))
