import sys
import os
import time
import tensorflow as tf
import numpy as np
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#model_dir = os.path.abspath("./pretrain_yangmi")
model_dir = os.path.abspath(sys.argv[1])

custom_infer_abs = {
    'voice_name':
    'F7',
    'am_ckpt':
    os.path.join(model_dir, 'tmp_am', 'ckpt'),
    'am_config':
    os.path.join(model_dir, 'tmp_am', 'config.yaml'),
    'voc_ckpt':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
    'voc_config':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
             'config.yaml'),
    'audio_config':
    os.path.join(model_dir, 'data', 'audio_config.yaml'),
    'se_file':
    os.path.join(model_dir, 'data', 'se', 'se.npy')
}
kwargs = {'custom_ckpt': custom_infer_abs}

model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)

inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
srctxt = sys.argv[2]
dstpath = sys.argv[3]
with open(srctxt, 'r') as f:
   alltxt = f.readlines() 
for txtline in alltxt:
    txtline=txtline.strip().split()
    start = time.time()
    output = inference(input=txtline[1])
    print('spend time {}'.format(time.time()-start))
    wav = output[OutputKeys.OUTPUT_WAV]
    with open(os.path.join(dstpath,txtline[0]+".wav"), 'wb') as f:
        f.write(wav)
models_path = sys.argv[4]
enc_model = tf.keras.models.load_model(models_path+'/tfmodel/txtenc')
emot_model = tf.keras.models.load_model(models_path+'/tfmodel/emot')
varadp_model1 = tf.keras.models.load_model(models_path+'/tfmodel/varadp')
duration_model = tf.keras.models.load_model(models_path+'/tfmodel/duration')
LR_model = tf.keras.models.load_model(models_path+'/tfmodel/LR')
dursin_model = tf.keras.models.load_model(models_path+'/tfmodel/dursin')
mem_model = tf.keras.models.load_model(models_path+'/tfmodel/mem')
hdec_model = tf.keras.models.load_model(models_path+'/tfmodel/attenH')
get_mask_model = tf.keras.models.load_model(models_path+'/tfmodel/getmask')
dec1_model = tf.keras.models.load_model(models_path+'/tfmodel/meldec1')
dec2_model = tf.keras.models.load_model(models_path+'/tfmodel/meldec2')
post_model = tf.keras.models.load_model(models_path+'/tfmodel/post')
post0_model = tf.keras.models.load_model(models_path+'/tfmodel/post0')
post1_model = tf.keras.models.load_model(models_path+'/tfmodel/post1')
post2_model = tf.keras.models.load_model(models_path+'/tfmodel/post2')
post3_model = tf.keras.models.load_model(models_path+'/tfmodel/post3')
dnorm_model = tf.keras.models.load_model(models_path+'/tfmodel/dnorm')
dec2_pncas = []
for layi in range(12):
    pnca_model = tf.keras.models.load_model(models_path+'/tfmodel/dec2pnca_{}'.format(layi))
    dec2_pncas.append(pnca_model)

def tf_tts_infer(am_input):
    inputs_ling = am_input[0].cpu().numpy()
    enc_out = enc_model((tf.convert_to_tensor(inputs_ling)),training=False)
    return enc_out

def tf_variance_adaptor(am_input, txt_hid):
    inputs_emotion = am_input[1].cpu().numpy()
    inputs_emotion = tf.convert_to_tensor(inputs_emotion)
    emo_hid = emot_model((inputs_emotion),training=False)
    print('emot hid {}'.format(emo_hid))
    spk_hid = tf.convert_to_tensor(am_input[2].cpu().numpy())
    print('spk hid  {}'.format(spk_hid))

    txt_emb_aug,input_pred_con = varadp_model1((txt_hid,emo_hid,spk_hid),training=False)

    cond_len = tf.shape(input_pred_con).numpy()[1]
    pre_x = tf.zeros([1,1,1])
    pre_h0 = tf.zeros([1,128])
    pre_c0 = tf.zeros([1,128])
    pre_h1 = tf.zeros([1,128])
    pre_c1 = tf.zeros([1,128])
    dura_output = []
    for il in range(cond_len):
        out_x, h0,c0,h1,c1,out_logx=duration_model((pre_x,input_pred_con[:,il:il+1,:],pre_h0,pre_c0,pre_h1,pre_c1),training=False)
        pre_x = out_x
        pre_h0 = h0
        pre_c0 = c0
        pre_h1 = h1
        pre_c1 = c1
        dura_output.append(out_logx)
    duration_predictions = tf.concat(dura_output,axis=1)

    LR_txt_out,LR_txt_len = LR_model((txt_emb_aug,duration_predictions),training=False)

    LR_txt_out = dursin_model((duration_predictions,LR_txt_out),training=False)

    LR_emo_out,_ = LR_model((emo_hid,duration_predictions),training=False)
    LR_spk_out,_ = LR_model((spk_hid,duration_predictions),training=False)

    memout = mem_model((LR_txt_out,LR_spk_out,LR_emo_out),training=False)
    return memout,duration_predictions,LR_txt_len

def tf_mel_dec(memory,dur_pred):
    hdec_out1,hdec_out2 = hdec_model((memory),training=False)

    dur_maxlen = int(tf.math.reduce_max(dur_pred)/3+0.5)
    mem_len = tf.shape(memory)[1]
    x_mask,h_mask = get_mask_model((tf.convert_to_tensor([[mem_len]]),tf.convert_to_tensor([[dur_maxlen]]),tf.convert_to_tensor([[dur_maxlen]])),training=False)

    mem_len = tf.shape(memory)[1].numpy()
    dec_inputs = tf.zeros([1,1,82])
    p_xk = [tf.zeros([1,8,1,16]) for i in range(12)]
    p_xv = [tf.zeros([1,8,1,16]) for i in range(12)]
    dec_outlist = []
    for step in range(mem_len):
        dec1_output = dec1_model((dec_inputs,memory[:,step:step+1,:]),training=False)
        #print('step {} dec1 {} '.format(step,dec1_output[:,:,:6]))
        #print('step {} xqin  {} {}'.format(step,dec1_output[:,:,:5],dec1_output[:,:,-5:]))
        #print('step {} xkin  {} {}'.format(step,p_xk[0,0,step,:5],p_xk[0,0,step,-5:]))
        #print('step {} xvin  {} {}'.format(step,p_xv[0,0,step,:5],p_xv[0,0,step,-5:]))
        #print('step {} hkin  {} {}'.format(step,hdec_out1[0,0,step,:5],hdec_out1[0,0,step,-5:]))
        #print('step {} hvin  {} {}'.format(step,hdec_out2[0,0,step,:5],hdec_out2[0,0,step,-5:]))
        #print('step {} xmask  {}'.format(step,x_mask[:,step:step+1,:10]))
        #print('step {} hmask  {}'.format(step,h_mask[:,step:step+1,:10]))
        dec2_output = dec1_output
        x_ks = []
        x_vs = []
        for layi in range(12):
            dec2_output,x_k,x_v = dec2_pncas[layi]((dec2_output,p_xk[layi],p_xv[layi],hdec_out1[layi:layi+1,:,:,:],hdec_out2[layi:layi+1,:,:,:],x_mask[:,step:step+1,:step+1],h_mask[:,step:step+1,:]),training=False)
            x_ks.append(x_k)
            x_vs.append(x_v)
        dec2_output = dec2_model((dec2_output),training=False)
        dec_inputs = dec2_output[:,:,-82:]
        #print('step {} dec2 out {} {}'.format(step,dec2_output[:,:,:5],dec2_output[:,:,-5:]))
        #print('step {} pnc out {} {}'.format(step,pnc_output[0,:,:5],pnc_output[0,:,-5:]))
        #print('step {} norm out {} {}'.format(step,norm_output[0,:,:5],norm_output[0,:,-5:]))
        #print('step {} lin out {} {}'.format(step,lin_output[0,:,:5],lin_output[0,:,-5:]))
        p_xk = x_ks
        p_xv = x_vs
        dec_outlist.append(dec2_output)
    #print('dec output {}'.format(tf.concat(dec_outlist,axis=1)))    
    return tf.concat(dec_outlist,axis=1)

def tf_mel_post(mel_dec_out,valid_len):

    mel_dec_out = tf.reshape(mel_dec_out,(1,-1,82))
    valid_len = valid_len.numpy()[0]
    if valid_len < tf.shape(mel_dec_out)[1]:
        mel_dec_out = tf.concat([mel_dec_out[:,:valid_len,:],tf.zeros([1,tf.shape(mel_dec_out)[1]-valid_len,82])],axis=1)
        postmask = tf.concat([tf.ones([1,valid_len,1],dtype=tf.bool),tf.zeros([1,tf.shape(mel_dec_out)[1]-valid_len,1],dtype=bool)],axis=1)
    else:
        postmask = tf.ones([1,valid_len,1],dtype=tf.bool)
    post0_output,_ = post0_model((mel_dec_out,postmask,tf.zeros([1,37,256]),tf.zeros([1,3,256])),training=False)
    post1_output,_ = post1_model((post0_output,postmask,tf.zeros([1,37,256]),tf.zeros([1,3,256])),training=False)
    post2_output,_ = post2_model((post1_output,postmask,tf.zeros([1,37,256]),tf.zeros([1,3,256])),training=False)
    post3_output,_ = post3_model((post2_output,postmask,tf.zeros([1,37,256]),tf.zeros([1,3,256])),training=False)
    post_output,_,_ = post_model((post3_output,tf.zeros([1,128]),tf.zeros([1,128]),mel_dec_out),training=False)
    post_output = tf.where(postmask,post_output,tf.zeros(1))
    return post_output[:,:valid_len,:]

def denorm_f0(mel):
    res_mel = dnorm_model((mel),training=False)

    #from tts_voc_tflite_mul import voc_infer
    from tts_voc_loadmul import voc_infer
    audio = voc_infer(res_mel[0,:,:])
    return audio 

outpcms = []
for kid,aminput in enumerate(model_id.voices['F7'].tmp_aminput):
    txt_embed = tf_tts_infer(aminput)
    tf_var_out,dur_prdiction,LR_length_rounded = tf_variance_adaptor(aminput,txt_embed)
    mel_output = tf_mel_dec(tf_var_out,dur_prdiction)
    post_mel = tf_mel_post(mel_output,LR_length_rounded)
    pcm = denorm_f0(post_mel)
    print('pcm {}'.format(pcm.shape))
    outpcms.append(pcm)
outpcms = np.concatenate(outpcms,axis=0)
import wave
with wave.open(dstpath+"/tfdebug.wav", 'wb') as wf:
    wf.setnchannels(1)
    wf.setsampwidth(2)
    wf.setframerate(16000)
    wf.writeframes(outpcms.data)
