import sys
import os
import time
import tensorflow as tf
import numpy as np
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#model_dir = os.path.abspath("./pretrain_yangmi")
model_dir = os.path.abspath(sys.argv[1])

custom_infer_abs = {
    'voice_name':
    'F7',
    'am_ckpt':
    os.path.join(model_dir, 'tmp_am', 'ckpt'),
    'am_config':
    os.path.join(model_dir, 'tmp_am', 'config.yaml'),
    'voc_ckpt':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
    'voc_config':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
             'config.yaml'),
    'audio_config':
    os.path.join(model_dir, 'data', 'audio_config.yaml'),
    'se_file':
    os.path.join(model_dir, 'data', 'se', 'se.npy')
}
kwargs = {'custom_ckpt': custom_infer_abs}

model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)

inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
srctxt = sys.argv[2]
dstpath = sys.argv[3]
with open(srctxt, 'r') as f:
   alltxt = f.readlines() 
for txtline in alltxt:
    txtline=txtline.strip().split()
    start = time.time()
    output = inference(input=txtline[1])
    print('spend time {}'.format(time.time()-start))
    wav = output[OutputKeys.OUTPUT_WAV]
    with open(os.path.join(dstpath,txtline[0]+".wav"), 'wb') as f:
        f.write(wav)

se = np.load(custom_infer_abs['se_file'])
with open(os.path.join(dstpath,'tflite/se.bin'),'wb') as f:
    for sev in se[0]:
        f.write(sev)
#print('am para')
#am_model_para = model_id.voices['F7'].am.state_dict()
#for k,v in am_model_para.items():
#     print("{:20s} {}".format(k, v.shape))
#
#print('voc para')
#voc_model_para = model_id.voices['F7'].voc_model.state_dict()
#for k,v in voc_model_para.items():
#     print("{:20s} {}".format(k, v.shape))
#print('voc para')
#model_para = model_id.voices['F7'].voc_model.state_dict()
#for k,v in model_para.items():
#     print("{:20s} {}".format(k, v.shape))
tfmodel_path = sys.argv[4]
if not os.path.exists(tfmodel_path+"/tflite"):
    os.makedirs(tfmodel_path+"/tflite")
allstart = time.time()

def save_tflite_model(tf_model,tf_path):
    converter = tf.lite.TFLiteConverter.from_saved_model(tf_model)
    converter.target_spec.supported_ops = [
              tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
              tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
    ]
    #converter.optimizations = [tf.lite.Optimize.DEFAULT]
    #converter.target_spec.supported_types = [tf.float16]
    tflite_model = converter.convert()
    # Print the signatures from the converted model
    #interpreter = tf.lite.Interpreter(model_content=tflite_model)
    #signatures = interpreter.get_signature_list()
    #print(signatures)
    with open(tf_path,'wb') as f:
        f.write(tflite_model)
    return tflite_model
def save_tflite_model1(tf_model,tf_path):
    converter = tf.lite.TFLiteConverter.from_saved_model(tf_model)
    converter.target_spec.supported_ops = [
              tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
              tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
    ]
    #converter.optimizations = [tf.lite.Optimize.DEFAULT]
    #converter.target_spec.supported_types = [tf.float16]
    tflite_model = converter.convert()
    # Print the signatures from the converted model
    #interpreter = tf.lite.Interpreter(model_content=tflite_model)
    #signatures = interpreter.get_signature_list()
    #print(signatures)
    with open(tf_path,'wb') as f:
        f.write(tflite_model)
    return tflite_model
def save_tflite_model2(tf_model,tf_path):
    converter = tf.lite.TFLiteConverter.from_saved_model(tf_model)
    converter.target_spec.supported_ops = [
              tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
              tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
    ]
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.target_spec.supported_types = [tf.float16]
    tflite_model = converter.convert()
    # Print the signatures from the converted model
    #interpreter = tf.lite.Interpreter(model_content=tflite_model)
    #signatures = interpreter.get_signature_list()
    #print(signatures)
    with open(tf_path,'wb') as f:
        f.write(tflite_model)
    return tflite_model
def tf_tts_infer(am_input):
    inputs_ling = am_input[0].cpu().numpy()

    model_path = tfmodel_path+'/tfmodel/txtenc'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)
    # Print the signatures from the converted model
    enc_model = tf.lite.Interpreter(tflite_path)
    #enc_out = enc_model((tf.convert_to_tensor(inputs_ling)),training=False)
    sig = enc_model.get_signature_runner('serving_default')
    #print('inputs_ling {}'.format(inputs_ling))
    start = time.time()
    sig_out = sig(text_in=tf.convert_to_tensor(inputs_ling,dtype=tf.int32))
    print('txtenc time {}'.format(time.time()-start))
    enc_out = sig_out['text_fft_encoder']
    print('enc_out {} {}'.format(enc_out,enc_out.shape))
    
    return enc_out

txt_embed = tf_tts_infer(model_id.voices['F7'].tmp_aminput[0])
#txt_embed = None

def tf_variance_adaptor(am_input, txt_hid):

    #inputs_emotion = am_input[1].cpu().numpy()
    #inputs_emotion = tf.convert_to_tensor(inputs_emotion)
    #emo_hid = emot_model((inputs_emotion),training=False)

    model_path = tfmodel_path+'/tfmodel/emot'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)

    inputs_emotion = am_input[1].cpu().numpy()
    inputs_emotion = tf.convert_to_tensor(inputs_emotion,dtype=tf.int32)
    #emo_hid = emot_model((inputs_emotion),training=False)
    emo_model = tf.lite.Interpreter(tflite_path)
    sig = emo_model.get_signature_runner('serving_default')
    start = time.time()
    sig_out = sig(args_0 = inputs_emotion)
    print('emo time {}'.format(time.time()-start))
    emo_hid = sig_out['emot_emd']
    print('emot hid {}'.format(emo_hid))

    spk_hid = tf.convert_to_tensor(am_input[2].cpu().numpy())
    print('spk hid  {}'.format(spk_hid))

    model_path = tfmodel_path+'/tfmodel/varadp'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)

    varadp_model1 = tf.lite.Interpreter(tflite_path)
    sig = varadp_model1.get_signature_runner('serving_default')
    start = time.time()
    sig_out = sig(emo_in=emo_hid,spk_in=spk_hid,txt_in=txt_hid)
    print('varadp1 time {}'.format(time.time()-start))
    txt_emb_aug = sig_out['variance_adaptor1']
    input_pred_con = sig_out['variance_adaptor1_1']
    print('txt aug {} {}'.format(tf.shape(txt_emb_aug), txt_emb_aug))
    print('input pre {} {}'.format(tf.shape(input_pred_con), input_pred_con))

    model_path = tfmodel_path+'/tfmodel/duration'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)
    duration_model = tf.lite.Interpreter(tflite_path)
    sig = duration_model.get_signature_runner('serving_default')

    cond_len = tf.shape(input_pred_con).numpy()[1]
    pre_x = tf.zeros([1,1,1])
    pre_h0 = tf.zeros([1,128])
    pre_c0 = tf.zeros([1,128])
    pre_h1 = tf.zeros([1,128])
    pre_c1 = tf.zeros([1,128])
    dura_output = []
    start = time.time()
    for il in range(cond_len):
        #out_x, h0,c0,h1,c1=duration_model((pre_x,input_pred_con[:,il:il+1,:],pre_h0,pre_c0,pre_h1,pre_c1),training=False)
        sig_out = sig(cond = input_pred_con[:,il:il+1,:],p_c0=pre_c0,p_c1=pre_c1,p_h0=pre_h0,p_h1=pre_h1,txt_in=pre_x)
        out_x=sig_out['var_rnn_ar_predictor']
        h0=sig_out['var_rnn_ar_predictor_1']
        c0=sig_out['var_rnn_ar_predictor_2']
        h1=sig_out['var_rnn_ar_predictor_3']
        c1=sig_out['var_rnn_ar_predictor_4']
        out_logx=sig_out['var_rnn_ar_predictor_5']
        dura_output.append(out_logx)
        pre_x = out_x
        pre_h0 = h0
        pre_c0 = c0
        pre_h1 = h1
        pre_c1 = c1
    duration_predictions = tf.concat(dura_output,axis=1)
    #duration_predictions = tf.math.exp(log_duration_predictions)-1
    #print('duration time {}'.format(time.time()-start))
    print('duration_predictions {}'.format(duration_predictions))

    #class  LengthRegulator(tf.keras.layers.Layer):
    #    def __init__(self,r, **kwargs):
    #        super(LengthRegulator, self).__init__(**kwargs)
    #        self.r = r

    #    def call(self, inputs, durations):
    #        durations = tf.squeeze(durations,axis=-1)
    #        reps = durations + 0.5
    #        reps = tf.cast(reps, dtype=tf.int64)
    #        output_lens = tf.math.reduce_sum(reps,axis=1)
    #        max_len = tf.math.reduce_max(output_lens)
    #        paddings = tf.constant([[0, 0,], [1, 0]])
    #        reps_pad = tf.pad(reps,paddings)
    #        reps_cumsum = tf.expand_dims(tf.math.cumsum(reps_pad,axis=1),axis=1)
    #        range_ = tf.expand_dims(tf.expand_dims(tf.range(max_len),axis=-1),axis=0)
    #        range_f = tf.cast(range_, dtype=tf.float32)
    #        mult = (reps_cumsum[:, :, :-1] <= range_) & (reps_cumsum[:, :, 1:] > range_)
    #        mult = tf.cast(mult, dtype=tf.float32)
    #        out = tf.matmul(mult, inputs)
    #        seq_len = tf.shape(out)[1]
    #        padding_len = (self.r - (seq_len % self.r)) % self.r
    #        padding_out = tf.zeros([1,padding_len,tf.shape(out)[2]])
    #        out  = tf.concat([out,padding_out],axis=1)
    #        return out,output_lens

    #class  DurSinusoidalPositionEncoder(tf.keras.layers.Layer):
    #    def __init__(self, depth, outputs_per_step, **kwargs):
    #        super(DurSinusoidalPositionEncoder, self).__init__(**kwargs)
    #        self.depth = depth
    #        self.outputs_per_step = outputs_per_step
    #        #inv_timescales = [np.power(10000, 2 * (hid_idx // 2) / depth) for hid_idx in range(depth)]
    #        #self.inv_timescales = tf.convert_to_tensor(inv_timescales, dtype=tf.float32)
    #        div_term = tf.math.exp(tf.range(0, self.depth, 2, dtype=tf.float32)*-1*(tf.math.log(10000.0)/self.depth))
    #        self.inv_timescales = tf.expand_dims(div_term,axis=0)

    #    def call(self, durations):
    #        durations = tf.squeeze(durations,axis=-1)
    #        reps_f = durations + 0.5
    #        reps = tf.cast(reps_f, dtype=tf.int64)
    #        output_lens = tf.math.reduce_sum(reps,axis=1)
    #        max_len = tf.math.reduce_max(output_lens)
    #        paddings = tf.constant([[0, 0,], [1, 0]])
    #        reps_pad = tf.pad(reps,paddings)
    #        reps_cumsum = tf.expand_dims(tf.math.cumsum(reps_pad,axis=1),axis=1)
    #        range_ = tf.expand_dims(tf.expand_dims(tf.range(max_len),axis=-1),axis=0)
    #        mult = (reps_cumsum[:, :, :-1] <= range_) & (reps_cumsum[:, :, 1:] > range_)
    #        mult = tf.cast(mult, dtype=tf.float32)
    #        reps_cumsum = tf.cast(reps_cumsum, dtype=tf.float32)

    #        offsets = tf.squeeze(tf.matmul(mult, tf.expand_dims(reps_cumsum[:, 0, :-1],axis=-1)),axis=-1)
    #        range_f = tf.cast(range_, dtype=tf.float32)
    #        dur_pos = range_f[:, :, 0] - offsets + 1
    #        seq_len = tf.shape(dur_pos)[1]
    #        padding_len = (self.outputs_per_step - (seq_len % self.outputs_per_step)) % self.outputs_per_step
    #        #paddings = tf.constant([[0, 0,], [0, padding_len],[0,0]])
    #        #dur_pos = tf.pad(dur_pos,paddings)
    #        dur_pad = tf.zeros([1,padding_len],dtype=tf.float32)
    #        dur_pos = tf.concat([dur_pos,dur_pad],axis=-1)
    #        dur_pos = tf.expand_dims(dur_pos,axis=-1)
    #        return dur_pos,self.inv_timescales
    #LR_inputs = tf.keras.Input(shape=(None,None),batch_size=1, name='lr_inputs')
    #duration_pred = tf.keras.Input(shape=(None,1),batch_size=1, name='duration_pred')
    #LR_out,LR_len = LengthRegulator(3)(LR_inputs,duration_pred)
    #LR_model = tf.keras.Model((LR_inputs,duration_pred),(LR_out,LR_len))
    #LR_model.summary()

    start = time.time()
    #print('txt emb {}'.format(txt_emb_aug.shape))
    #print('duration  {}'.format(duration_predictions))
    #LR_txt_out,LR_txt_len = LR_model((txt_emb_aug,duration_predictions),training=False)
    model_path = tfmodel_path+'/tfmodel/LR'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)

    sig = tf.lite.Interpreter(tflite_path).get_signature_runner('serving_default')
    sig_out = sig(lr_inputs=txt_emb_aug,duration_pred=duration_predictions)
    LR_txt_out = sig_out['length_regulator']
    LR_txt_len = sig_out['length_regulator_1']
    print('lr txt {} {} {}'.format(LR_txt_len,LR_txt_out,LR_txt_out.shape))
    sig_out = sig(lr_inputs=emo_hid,duration_pred=duration_predictions)
    LR_emo_out = sig_out['length_regulator']
    sig_out = sig(lr_inputs=spk_hid,duration_pred=duration_predictions)
    LR_spk_out = sig_out['length_regulator']

    #duration_pred = tf.keras.Input(shape=(None,1),batch_size=1, name='duration_pred')
    #dur_pos,inv_timescales = DurSinusoidalPositionEncoder(32,3)(duration_pred)
    #dursin_model = tf.keras.Model((duration_pred),(dur_pos,inv_timescales))
    #dursin_model.summary()

    #dur_pos_out,inv_scale_out = dursin_model((duration_predictions),training=False)
    #pe_pos = tf.math.multiply(dur_pos_out,inv_scale_out)
    #pe_even = tf.expand_dims(tf.math.sin(pe_pos),-1)
    #pe_odd = tf.expand_dims(tf.math.cos(pe_pos),-1)
    #pe = tf.concat([pe_even,pe_odd],axis=-1)
    #position_embedding = tf.reshape(pe,(-1,32))

    model_path = tfmodel_path+'/tfmodel/dursin'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)

    sig = tf.lite.Interpreter(tflite_path).get_signature_runner('serving_default')
    sig_out = sig(duration_pred=duration_predictions,lr_txt_in=LR_txt_out)
    LR_txt_out = sig_out['dur_sinusoidal_position_encoder']
    #print('dursin {} {}'.format(LR_txt_out,LR_txt_out.shape))

    model_path = tfmodel_path+'/tfmodel/mem'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)
    sig = tf.lite.Interpreter(tflite_path).get_signature_runner('serving_default')
    sig_out = sig(txt_in=LR_txt_out,spk_in=LR_spk_out,emo_in=LR_emo_out)
    memout = sig_out['txtmem']

    print('memout {} {}'.format(memout,memout.shape))
    print('dura {}'.format(duration_predictions))
    return memout,duration_predictions,LR_txt_len
    #return None,None

tf_var_out,dur_prdiction,LR_length_rounded = tf_variance_adaptor(model_id.voices['F7'].tmp_aminput[0],txt_embed)

def tf_mel_dec(memory,dur_pred):

    model_path = tfmodel_path+'/tfmodel/attenH'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)

    #hdec_out1,hdec_out2 = hdec_model((memory),training=False)
    #print('hdec {} {}'.format(tf.shape(hdec_out1),tf.shape(hdec_out2)))
    hdec_model = tf.lite.Interpreter(tflite_path)
    sig = hdec_model.get_signature_runner('serving_default')
    start = time.time()
    sig_out = sig(mem_in = memory)
    print('attenH time {}'.format(time.time()-start))
    hdec_out1 = sig_out['hybrid_attention_decoder_h']
    hdec_out2 = sig_out['hybrid_attention_decoder_h_1']
    #print('hdec_out {} {}'.format(hdec_out1,hdec_out2))



    #class  get_pnca_attn_mask(tf.keras.layers.Layer):
    #    def __init__(self, **kwargs):
    #        super(get_pnca_attn_mask, self).__init__(**kwargs)

    #    def call(self,max_len,x_band_width,h_band_width):
    #        max_len = max_len[0,0]
    #        x_band_width = x_band_width[0,0]
    #        h_band_width = h_band_width[0,0]
    #        range_ = tf.range(max_len)
    #        x_start = tf.clip_by_value(range_ - x_band_width, clip_value_min=0,clip_value_max=max_len-1)
    #        x_start = tf.expand_dims(tf.expand_dims(x_start,axis=0),axis=0)
    #        x_end = tf.expand_dims(tf.expand_dims(range_+1,axis=0),axis=0)

    #        h_start = tf.expand_dims(tf.expand_dims(range_,axis=0),axis=0)
    #        h_end = tf.clip_by_value(range_ + h_band_width + 1, clip_value_min=0,clip_value_max=max_len + 1)
    #        h_end = tf.expand_dims(tf.expand_dims(h_end,axis=0),axis=0)

    #        #pnca_x_attn_mask = ~(x_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0) & x_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0))
    #        #pnca_h_attn_mask = ~(h_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0) & h_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0))
    #        pnca_x_attn_mask = tf.math.logical_not(tf.math.logical_and(x_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0), x_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0)))
    #        pnca_h_attn_mask = tf.math.logical_not(tf.math.logical_and(h_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0), h_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0)))

    #        pnca_x_attn_mask = tf.transpose(pnca_x_attn_mask, perm=[0, 2, 1])
    #        pnca_h_attn_mask = tf.transpose(pnca_h_attn_mask, perm=[0, 2, 1])
    #        return pnca_x_attn_mask,pnca_h_attn_mask

    #max_in = tf.keras.Input(shape=(1),batch_size=1, dtype=tf.int32,name='max_in')
    #x_in = tf.keras.Input(shape=(1),batch_size=1, dtype=tf.int32,name='x_in')
    #h_in = tf.keras.Input(shape=(1),batch_size=1, dtype=tf.int32,name='h_in')
    #x_att_mask,h_att_mask = get_pnca_attn_mask(name='getmask')(max_in,x_in,h_in)
    #get_mask_model = tf.keras.Model((max_in,x_in,h_in),(x_att_mask,h_att_mask))
    #get_mask_model.summary()

    #start = time.time()
    dur_maxlen = int(tf.math.reduce_max(dur_pred)/3+0.5)
    mem_len = tf.shape(memory)[1]
    #x_mask,h_mask = get_mask_model((tf.convert_to_tensor([[mem_len]]),tf.convert_to_tensor([[dur_maxlen]]),tf.convert_to_tensor([[dur_maxlen]])),training=False)
    model_path = tfmodel_path+'/tfmodel/getmask'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)
    getmask_model = tf.lite.Interpreter(tflite_path)
    sig = getmask_model.get_signature_runner('serving_default')
    sig_out = sig(max_in=tf.convert_to_tensor([[mem_len]]),x_in=tf.convert_to_tensor([[dur_maxlen]]),h_in=tf.convert_to_tensor([[dur_maxlen]]))
    x_mask = sig_out['getmask']
    h_mask = sig_out['getmask_1']
    print('getmask time {}'.format(time.time()-start))
    print('dur max {}'.format(dur_maxlen))
    print('x max {} {}'.format(x_mask.shape,x_mask))

    model_path = tfmodel_path+'/tfmodel/meldec1'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)
    # Print the signatures from the converted model
    dec1_model = tf.lite.Interpreter(tflite_path)
    sig_dec1 = dec1_model.get_signature_runner('serving_default')

    dec2_pncas = []
    for layi in range(12):
        model_path = tfmodel_path+'/tfmodel/dec2pnca_{}'.format(layi)
        tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
        save_tflite_model(model_path,tflite_path)
        dec2_pnca_model = tf.lite.Interpreter(tflite_path)
        dec2_pncas.append(dec2_pnca_model)


    model_path = tfmodel_path+'/tfmodel/meldec2'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)

    dec2_model = tf.lite.Interpreter(tflite_path)
    sig_dec2 = dec2_model.get_signature_runner('serving_default')

    mem_len = tf.shape(memory)[1].numpy()
    dec_inputs = tf.zeros([1,1,82])
    p_xk = [tf.zeros([1,8,1,16]) for i in range(12)]
    p_xv = [tf.zeros([1,8,1,16]) for i in range(12)]
    dec_outlist = []
    start = time.time()
    for step in range(mem_len):
        #print('step {} memin {}'.format(step,memory[:,step:step+1,:6])) 
        sig_dec1_out = sig_dec1(mel_in=dec_inputs,mem_in=memory[:,step:step+1,:])
        dec1_output = sig_dec1_out['hybrid_attention_decoder_1']
        #print('step {} dec1 output {}'.format(step,dec1_output[:,:,:6]))    
        #print('step {} xqin  {} {}'.format(step,dec1_output[:,:,:5],dec1_output[:,:,-5:]))
        #print('step {} xkin  {} {}'.format(step,p_xk[0,0,step,:5],p_xk[0,0,step,-5:]))
        #print('step {} xvin  {} {}'.format(step,p_xv[0,0,step,:5],p_xv[0,0,step,-5:]))
        #print('step {} hkin  {} {}'.format(step,hdec_out1[0,0,step,:5],hdec_out1[0,0,step,-5:]))
        #print('step {} hvin  {} {}'.format(step,hdec_out2[0,0,step,:5],hdec_out2[0,0,step,-5:]))
        #print('step {} xmask  {}'.format(step,x_mask[:,step:step+1,:10]))
        #print('step {} hmask  {}'.format(step,h_mask[:,step:step+1,:10]))
        dec2_output = dec1_output
        x_ks = []
        x_vs = []
        for layi in range(12):
            sig_pnca = dec2_pncas[layi].get_signature_runner('serving_default')
            sig_pnca_out = sig_pnca(x_q_in=dec2_output,x_k_in=p_xk[layi],x_v_in=p_xv[layi],h_k_in=hdec_out1[layi:layi+1,:,:,:],h_v_in=hdec_out2[layi:layi+1,:,:,:],x_mask_in=x_mask[:,step:step+1,:step+1],h_mask_in=h_mask[:,step:step+1,:])
            dec2_output=sig_pnca_out['dec2pnca_{}'.format(layi)]
            x_k=sig_pnca_out['dec2pnca_{}_1'.format(layi)]
            x_v=sig_pnca_out['dec2pnca_{}_2'.format(layi)]
            x_ks.append(x_k)
            x_vs.append(x_v)
        sig_dec2_out = sig_dec2(pnca_in=dec2_output)
        dec2_output=sig_dec2_out['hybrid_attention_decoder_2']
        dec_inputs = dec2_output[:,:,-82:]
        p_xk = x_ks
        p_xv = x_vs
        #print('step {} dec2 out {} {}'.format(step,dec2_output[:,:,:5],dec2_output[:,:,-5:]))
        #pnc_output=sig_dec2_out['hybrid_attention_decoder_2_3']
        #print('step {} pnc out {} {}'.format(step,pnc_output[0,:,:5],pnc_output[0,:,-5:]))
        #norm_output=sig_dec2_out['hybrid_attention_decoder_2_4']
        #print('step {} norm out {} {}'.format(step,norm_output[0,:,:5],norm_output[0,:,-5:]))
        #lin_output=sig_dec2_out['hybrid_attention_decoder_2_5']
        #print('step {} lin out {} {}'.format(step,lin_output[0,:,:5],lin_output[0,:,-5:]))
        dec_outlist.append(dec2_output)
    print('decoder time {}'.format(time.time()-start))
    print('dec output {}'.format(tf.concat(dec_outlist,axis=1)))    
    return tf.concat(dec_outlist,axis=1)


mel_output = tf_mel_dec(tf_var_out,dur_prdiction)

def tf_mel_post(mel_dec_out,valid_len):
    mel_dec_out = tf.reshape(mel_dec_out,(1,-1,82))
    #print('meldecout {} {}'.format(mel_dec_out.shape,mel_dec_out))
    #post_output = post_model((mel_dec_out),training=False)
    valid_len = valid_len[0]
    if valid_len < tf.shape(mel_dec_out)[1]:
        mel_dec_out = tf.concat([mel_dec_out[:,:valid_len,:],tf.zeros([1,tf.shape(mel_dec_out)[1]-valid_len,82])],axis=1)
        print('mel dec {}'.format(mel_dec_out))
        postmask = tf.concat([tf.ones([1,valid_len,1],dtype=tf.bool),tf.zeros([1,tf.shape(mel_dec_out)[1]-valid_len,1],dtype=bool)],axis=1)
    else:
        postmask = tf.ones([1,valid_len,1],dtype=tf.bool)

    model_path = tfmodel_path+'/tfmodel/post0'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)

    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')
    sig_out = sig(post_in=mel_dec_out,mask=postmask,lpadding=tf.zeros([1,37,256]),rpadding=tf.zeros([1,3,256]))
    post_output = sig_out['post_net0']

    model_path = tfmodel_path+'/tfmodel/post1'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)

    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')
    sig_out = sig(post_in=post_output,mask=postmask,lpadding=tf.zeros([1,37,256]),rpadding=tf.zeros([1,3,256]))
    post_output = sig_out['post_net1']

    model_path = tfmodel_path+'/tfmodel/post2'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)
    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')
    sig_out = sig(post_in=post_output,mask=postmask,lpadding=tf.zeros([1,37,256]),rpadding=tf.zeros([1,3,256]))
    post_output = sig_out['post_net2']

    model_path = tfmodel_path+'/tfmodel/post3'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)
    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')
    sig_out = sig(post_in=post_output,mask=postmask,lpadding=tf.zeros([1,37,256]),rpadding=tf.zeros([1,3,256]))
    post_output = sig_out['post_net3']

    model_path = tfmodel_path+'/tfmodel/post'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model(model_path,tflite_path)
    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')

    start = time.time()
    sig_out = sig(post_in=post_output,p_h=tf.zeros([1,128]),p_c=tf.zeros([1,128]),post_res=mel_dec_out)
    print('post time {}'.format(time.time()-start))
    post_output = sig_out['post_net']
    #post_output += mel_dec_out
    #valid_len = valid_len.numpy()[0]+2
    post_output = tf.where(postmask,post_output,tf.zeros(1))
    print('valid_len {}'.format(valid_len))
    print('post out {} {}'.format(post_output[:,:valid_len,:].shape,post_output[:,:valid_len,:]))
    return post_output[:,:valid_len,:]

post_mel = tf_mel_post(mel_output,LR_length_rounded)
print('spend time {}'.format(time.time()-allstart))

def denorm_f0(mel):

    #class  DenormF0(tf.keras.layers.Layer):
    #    def __init__(self, **kwargs):
    #        super(DenormF0, self).__init__(**kwargs)
    #        self.f0_global_max_min = [730.0,30.0]
    #        self.threshold=0.6
    #        self.f0_threshold=30.0

    #    def call(self,mel):
    #        mask = mel[:,:,-1:] < self.threshold
    #        vflag = tf.where(mask,tf.zeros([1]),tf.ones([1]))

    #        f0 = mel[:,:,-2:-1] * (self.f0_global_max_min[0] - self.f0_global_max_min[1]) + self.f0_global_max_min[1]
    #        f0mask = f0 < self.f0_threshold
    #        f0val = tf.where(f0mask,tf.convert_to_tensor([self.f0_threshold]),f0)

    #        res_mel = tf.concat([mel[:,:,:-2],f0val,vflag],axis=-1)
    #        return res_mel
    model_path = tfmodel_path+'/tfmodel/dnorm'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    save_tflite_model1(model_path,tflite_path)
    post_model = tf.lite.Interpreter(tflite_path)
    sig = post_model.get_signature_runner('serving_default')
    sig_out = sig(norm_in=mel)
    res_mel = sig_out['dnorm']
    print('dnorm {}'.format(res_mel))

    #print('res mel {}'.format(res_mel))
    #from tts_voc_tfmul import voc_infer
    from tts_voc_tflite_mul import voc_infer
    pcm = voc_infer(res_mel[0,:,:])
    import wave
    with wave.open(dstpath+"/tflitedebug.wav", 'wb') as wf:
        wf.setnchannels(1)
        wf.setsampwidth(2)
        wf.setframerate(16000)
        wf.writeframes(pcm.data)
    return res_mel
denorm_f0(post_mel)
#print('voc para')
#model_para = model_id.voices['F7'].voc_model.state_dict()
#for k,v in model_para.items():
#     print("{:20s} {}".format(k, v.shape))
