
import sys
import os
import time
import tensorflow as tf
import numpy as np
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

def load_pretrain_model(pretrain_dir,dst_path):
    model_dir = os.path.abspath(pretrain_dir)
    
    custom_infer_abs = {
        'voice_name':
        'F7',
        'am_ckpt':
        os.path.join(model_dir, 'tmp_am', 'ckpt'),
        'am_config':
        os.path.join(model_dir, 'tmp_am', 'config.yaml'),
        'voc_ckpt':
        os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
        'voc_config':
        os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
                 'config.yaml'),
        'audio_config':
        os.path.join(model_dir, 'data', 'audio_config.yaml'),
        'se_file':
        os.path.join(model_dir, 'data', 'se', 'se.npy')
    }
    kwargs = {'custom_ckpt': custom_infer_abs}
    
    model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)

    inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
    output = inference(input="我爱北京天安门")
    am_model_para = model_id.voices['F7'].am.state_dict()
    #for k,v in am_model_para.items():
    #     print("{:20s} {}".format(k, v.shape))

    se = np.load(custom_infer_abs['se_file'])
    with open(os.path.join(dst_path,'se.bin'),'wb') as f:
        for sev in se[0]:
            f.write(sev)
    return am_model_para

def save_tflite_model_int8(tf_model,tf_path):
    converter = tf.lite.TFLiteConverter.from_keras_model(tf_model)
    converter.target_spec.supported_ops = [
              tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
              tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
    ]
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model = converter.convert()
    with open(tf_path,'wb') as f:
        f.write(tflite_model)
def save_tflite_model(tf_model,tf_path):
    converter = tf.lite.TFLiteConverter.from_keras_model(tf_model)
    converter.target_spec.supported_ops = [
              tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
              tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
    ]
    #converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model = converter.convert()
    with open(tf_path,'wb') as f:
        f.write(tflite_model)
### position wise
class  PositionwiseConvFeedForward(tf.keras.layers.Layer):
    def __init__(self, d_in, d_hid, kernel_size=(3, 1), **kwargs):
        super(PositionwiseConvFeedForward, self).__init__(**kwargs)

        self.w_1 = tf.keras.layers.Conv1D(d_hid,kernel_size[0],strides=1,padding='same',use_bias=True,name='w1')

        self.w_2 = tf.keras.layers.Conv1D(d_in,kernel_size[1],strides=1,padding='same',use_bias=True,name='w2')

        self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6,name='pos_norm')

    def call(self, x):
        residual = x
        output = self.norm(x)

        output = tf.nn.relu(self.w_1(output))
        output = self.w_2(output)

        return output + residual

def tf_tts_infer(am_model_para, dst_tflite_path):

    class SinusoidalPositionEncoder(tf.keras.layers.Layer):
        def __init__(self, max_len, depth,**kwargs):
            super(SinusoidalPositionEncoder, self).__init__(**kwargs)

            inv_timescales = tf.math.exp(tf.range(0, depth/2, dtype=tf.float32)*-1*(tf.math.log(10000.0)/(depth/2-1)))
            position = tf.range(1, max_len+1,dtype=tf.float32)
            scaled_time = tf.reshape(position,(1,-1,1))*tf.reshape(inv_timescales,(1,1,-1))
            ss_pe_even = tf.math.sin(scaled_time)
            ss_pe_odd = tf.math.cos(scaled_time)
            self.position_enc = tf.concat([ss_pe_even,ss_pe_odd],axis=-1)

        def call(self, input):
            len_in = tf.shape(input)[1]
            output = input + self.position_enc[:, :len_in, :]
            return output

    ### multihead attention
    class MultiHeadAttention_0(tf.keras.layers.Layer):
        def __init__(self, in_feat, d_model, num_heads,**kwargs):
            super(MultiHeadAttention_0, self).__init__(**kwargs)
            self.num_heads = num_heads
            self.d_model = d_model
            self.depth = d_model//self.num_heads
            self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) 
            self.linear_q_k_v = tf.keras.layers.Dense(d_model*3)
            self.linear_out = tf.keras.layers.Dense(d_model)

        def forward_qkv(self, x,batch_size):
            q_k_v = self.linear_q_k_v(x)
            q,k,v = tf.split(q_k_v,num_or_size_splits=3,axis=-1)

            q_h = self.split_heads(q, batch_size)  # (batch_size, num_heads, seq_len_q, depth)
            k_h = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)
            v_h = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)
            return q_h, k_h, v_h, v

        def split_heads(self, x, batch_size):
            """分拆最后一个维度到 (num_heads, depth).
               转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
            """
            x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
            return tf.transpose(x, perm=[0, 2, 1, 3])

        def scaled_dot_product_attention(self,q, k, v):
            """计算注意力权重。
            q, k, v 必须具有匹配的前置维度。
            k, v 必须有匹配的倒数第二个维度，例如：seq_len_k = seq_len_v。
            虽然 mask 根据其类型（填充或前瞻）有不同的形状，
            但是 mask 必须能进行广播转换以便求和。

            参数:
                q: 请求的形状 == (..., seq_len_q, depth)
                k: 主键的形状 == (..., seq_len_k, depth)
                v: 数值的形状 == (..., seq_len_v, depth_v)
                mask: Float 张量，其形状能转换成
               (..., seq_len_q, seq_len_k)。默认为None。

                返回值:
                输出，注意力权重
                """
            #revmask = tf.constant(0,dtype=tf.bool)
            #mask = tf.expand_dims(tf.math.equal(revmask,tfone), axis=1)

            matmul_qk = tf.matmul(q, k, transpose_b=True)  # (..., seq_len_q, seq_len_k)

            # 缩放 matmul_qk
            dk = tf.cast(tf.shape(k)[-1], tf.float32)
            scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)

            # 将 mask 加入到缩放的张量上。
            #scaled_attention_logits = tf.where(mask,1e-10,scaled_attention_logits)
            #scaled_attention_logits += (mask * -1e9)

            # softmax 在最后一个轴（seq_len_k）上归一化，因此分数
            # 相加等于1。
            attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)  # (..., seq_len_q, seq_len_k)
            #attention_weights = tf.where(mask,0.0,attention_weights)

            output = tf.matmul(attention_weights, v)  # (..., seq_len_q, depth_v)

            return output

        #def call(self, q, k, v, mask):
        def call(self, x):
            batch_size = tf.shape(x)[0]

            x = self.norm(x)

            q_h, k_h, v_h, v = self.forward_qkv(x,batch_size)

            # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
            # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
            scaled_attention = self.scaled_dot_product_attention(
                  q_h, k_h, v_h)

            scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len_q, num_heads, depth)

            concat_attention = tf.reshape(scaled_attention, 
                  (batch_size, -1, self.d_model))  # (batch_size, seq_len_q, d_model)

            output = self.linear_out(concat_attention)  # (batch_size, seq_len_q, d_model)

            return output

    ### multihead attention
    class MultiHeadAttention_1(tf.keras.layers.Layer):
        def __init__(self, in_feat, d_model, num_heads,**kwargs):
            super(MultiHeadAttention_1, self).__init__(**kwargs)
            self.num_heads = num_heads
            self.d_model = d_model
            self.depth = d_model//self.num_heads
            self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) 
            self.linear_q_k_v = tf.keras.layers.Dense(d_model*3)
            self.linear_out = tf.keras.layers.Dense(d_model)

        def forward_qkv(self, x,batch_size):
            q_k_v = self.linear_q_k_v(x)
            q,k,v = tf.split(q_k_v,num_or_size_splits=3,axis=-1)

            q_h = self.split_heads(q, batch_size)  # (batch_size, num_heads, seq_len_q, depth)
            k_h = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)
            v_h = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)
            return q_h, k_h, v_h, v

        def split_heads(self, x, batch_size):
            """分拆最后一个维度到 (num_heads, depth).
               转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
            """
            x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
            return tf.transpose(x, perm=[0, 2, 1, 3])

        def scaled_dot_product_attention(self,q, k, v):
            """计算注意力权重。
            q, k, v 必须具有匹配的前置维度。
            k, v 必须有匹配的倒数第二个维度，例如：seq_len_k = seq_len_v。
            虽然 mask 根据其类型（填充或前瞻）有不同的形状，
            但是 mask 必须能进行广播转换以便求和。

            参数:
                q: 请求的形状 == (..., seq_len_q, depth)
                k: 主键的形状 == (..., seq_len_k, depth)
                v: 数值的形状 == (..., seq_len_v, depth_v)
                mask: Float 张量，其形状能转换成
               (..., seq_len_q, seq_len_k)。默认为None。

                返回值:
                输出，注意力权重
                """
            #revmask = tf.constant(0,dtype=tf.bool)
            #mask = tf.expand_dims(tf.math.equal(revmask,tfone), axis=1)

            matmul_qk = tf.matmul(q, k, transpose_b=True)  # (..., seq_len_q, seq_len_k)

            # 缩放 matmul_qk
            dk = tf.cast(tf.shape(k)[-1], tf.float32)
            scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)

            # 将 mask 加入到缩放的张量上。
            #scaled_attention_logits = tf.where(mask,1e-10,scaled_attention_logits)
            #scaled_attention_logits += (mask * -1e9)

            # softmax 在最后一个轴（seq_len_k）上归一化，因此分数
            # 相加等于1。
            attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)  # (..., seq_len_q, seq_len_k)
            #attention_weights = tf.where(mask,0.0,attention_weights)

            output = tf.matmul(attention_weights, v)  # (..., seq_len_q, depth_v)

            return output

        #def call(self, q, k, v, mask):
        def call(self, x):
            batch_size = tf.shape(x)[0]

            residual = x
            x = self.norm(x)

            q_h, k_h, v_h, v = self.forward_qkv(x,batch_size)

            # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
            # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
            scaled_attention = self.scaled_dot_product_attention(
                  q_h, k_h, v_h)

            scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len_q, num_heads, depth)

            concat_attention = tf.reshape(scaled_attention, 
                  (batch_size, -1, self.d_model))  # (batch_size, seq_len_q, d_model)

            output = self.linear_out(concat_attention)  # (batch_size, seq_len_q, d_model)

            return output + residual

    class SelfAttentionEncoder(tf.keras.layers.Layer):
        def __init__(self, d_model,d_in, d_inner,**kwargs):
            super(SelfAttentionEncoder, self).__init__(**kwargs)
            self.scale = tf.math.sqrt(tf.constant([d_model],dtype=tf.float32))
            self.max_len = 800
            self.position_enc = SinusoidalPositionEncoder(self.max_len,d_in)
            self.fft = tf.keras.Sequential()
            #self.fft.add(tf.keras.Input(shape=(None,d_in),batch_size=1,name='txt_in'))
            self.fft.add(MultiHeadAttention_0(d_in,d_model,8))
            self.fft.add(PositionwiseConvFeedForward(d_model,d_inner))
            for text_i in range(1,8):
                self.fft.add(MultiHeadAttention_1(d_model,d_model,8))
                self.fft.add(PositionwiseConvFeedForward(d_model,d_inner))
            self.ln = tf.keras.layers.LayerNormalization(epsilon=1e-6)

        def call(self, input):
            input *= self.scale
            input = self.position_enc(input)
            output = self.fft(input)
            output = self.ln(output)
            return output

    class TextFftEncoder(tf.keras.layers.Layer):
        def __init__(self, **kwargs):
            super(TextFftEncoder, self).__init__(**kwargs)
            self.d_emb = 512
            self.d_model = 128
            self.d_inner = 1024
            self.d_proj = 32
            self.sy_emb = tf.keras.layers.Embedding(147,self.d_emb)
            self.tone_emb = tf.keras.layers.Embedding(10,self.d_emb)
            self.syllable_flag_emb = tf.keras.layers.Embedding(8,self.d_emb)
            self.ws_emb = tf.keras.layers.Embedding(8,self.d_emb)
            self.ling_enc = SelfAttentionEncoder(self.d_model,self.d_emb,self.d_inner)
            self.ling_proj = tf.keras.layers.Dense(self.d_proj,use_bias=False)

        def call(self, inputs_ling):
            inputs_sy = inputs_ling[:, :, 0]
            inputs_tone = inputs_ling[:, :, 1]
            inputs_syllable_flag = inputs_ling[:, :, 2]
            inputs_ws = inputs_ling[:, :, 3]

            sy_embedding = self.sy_emb(inputs_sy)
            tone_embedding = self.tone_emb(inputs_tone)
            syllable_flag_embedding = self.syllable_flag_emb(inputs_syllable_flag)
            ws_embedding = self.ws_emb(inputs_ws)
            ling_embedding = (sy_embedding + tone_embedding + syllable_flag_embedding + ws_embedding)
            enc_output = self.ling_enc(ling_embedding)
            enc_output = self.ling_proj(enc_output)
            return enc_output
    
    text_input = tf.keras.Input(shape=(None,4),batch_size=1, dtype=tf.int32,name='text_in')
    enc_out = TextFftEncoder()(text_input)
    enc_model = tf.keras.Model((text_input),(enc_out))

    sy_emb_w = am_model_para["text_encoder.sy_emb.weight"].cpu().numpy()
    tone_emb_w = am_model_para["text_encoder.tone_emb.weight"].cpu().numpy()
    syllable_emb_w = am_model_para["text_encoder.syllable_flag_emb.weight"].cpu().numpy()
    ws_emb_w = am_model_para["text_encoder.ws_emb.weight"].cpu().numpy()
    list_para = [sy_emb_w,tone_emb_w,syllable_emb_w,ws_emb_w]

    fft0_norm_w = am_model_para["text_encoder.ling_enc.fft.0.slf_attn.layer_norm.weight"].cpu().numpy()
    fft0_norm_b = am_model_para["text_encoder.ling_enc.fft.0.slf_attn.layer_norm.bias"].cpu().numpy()
    fft0_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.0.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft0_qkv_b = am_model_para["text_encoder.ling_enc.fft.0.slf_attn.w_qkv.bias"].cpu().numpy()
    fft0_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.0.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft0_fc_b = am_model_para["text_encoder.ling_enc.fft.0.slf_attn.fc.bias"].cpu().numpy()
    fft0_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft0_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.w_1.bias"].cpu().numpy()
    fft0_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft0_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.w_2.bias"].cpu().numpy()
    fft0_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft0_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.0.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft0_norm_w,fft0_norm_b,fft0_qkv_w,fft0_qkv_b,fft0_fc_w,fft0_fc_b,fft0_pos_w1_w,fft0_pos_w1_b,fft0_pos_w2_w,fft0_pos_w2_b,fft0_pos_norm_w,fft0_pos_norm_b])

    fft1_norm_w = am_model_para["text_encoder.ling_enc.fft.1.slf_attn.layer_norm.weight"].cpu().numpy()
    fft1_norm_b = am_model_para["text_encoder.ling_enc.fft.1.slf_attn.layer_norm.bias"].cpu().numpy()
    fft1_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.1.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft1_qkv_b = am_model_para["text_encoder.ling_enc.fft.1.slf_attn.w_qkv.bias"].cpu().numpy()
    fft1_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.1.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft1_fc_b = am_model_para["text_encoder.ling_enc.fft.1.slf_attn.fc.bias"].cpu().numpy()
    fft1_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft1_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.w_1.bias"].cpu().numpy()
    fft1_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft1_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.w_2.bias"].cpu().numpy()
    fft1_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft1_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.1.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft1_norm_w,fft1_norm_b,fft1_qkv_w,fft1_qkv_b,fft1_fc_w,fft1_fc_b,fft1_pos_w1_w,fft1_pos_w1_b,fft1_pos_w2_w,fft1_pos_w2_b,fft1_pos_norm_w,fft1_pos_norm_b])

    fft2_norm_w = am_model_para["text_encoder.ling_enc.fft.2.slf_attn.layer_norm.weight"].cpu().numpy()
    fft2_norm_b = am_model_para["text_encoder.ling_enc.fft.2.slf_attn.layer_norm.bias"].cpu().numpy()
    fft2_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.2.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft2_qkv_b = am_model_para["text_encoder.ling_enc.fft.2.slf_attn.w_qkv.bias"].cpu().numpy()
    fft2_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.2.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft2_fc_b = am_model_para["text_encoder.ling_enc.fft.2.slf_attn.fc.bias"].cpu().numpy()
    fft2_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft2_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.w_1.bias"].cpu().numpy()
    fft2_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft2_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.w_2.bias"].cpu().numpy()
    fft2_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft2_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.2.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft2_norm_w,fft2_norm_b,fft2_qkv_w,fft2_qkv_b,fft2_fc_w,fft2_fc_b,fft2_pos_w1_w,fft2_pos_w1_b,fft2_pos_w2_w,fft2_pos_w2_b,fft2_pos_norm_w,fft2_pos_norm_b])

    fft3_norm_w = am_model_para["text_encoder.ling_enc.fft.3.slf_attn.layer_norm.weight"].cpu().numpy()
    fft3_norm_b = am_model_para["text_encoder.ling_enc.fft.3.slf_attn.layer_norm.bias"].cpu().numpy()
    fft3_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.3.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft3_qkv_b = am_model_para["text_encoder.ling_enc.fft.3.slf_attn.w_qkv.bias"].cpu().numpy()
    fft3_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.3.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft3_fc_b = am_model_para["text_encoder.ling_enc.fft.3.slf_attn.fc.bias"].cpu().numpy()
    fft3_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft3_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.w_1.bias"].cpu().numpy()
    fft3_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft3_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.w_2.bias"].cpu().numpy()
    fft3_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft3_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.3.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft3_norm_w,fft3_norm_b,fft3_qkv_w,fft3_qkv_b,fft3_fc_w,fft3_fc_b,fft3_pos_w1_w,fft3_pos_w1_b,fft3_pos_w2_w,fft3_pos_w2_b,fft3_pos_norm_w,fft3_pos_norm_b])

    fft4_norm_w = am_model_para["text_encoder.ling_enc.fft.4.slf_attn.layer_norm.weight"].cpu().numpy()
    fft4_norm_b = am_model_para["text_encoder.ling_enc.fft.4.slf_attn.layer_norm.bias"].cpu().numpy()
    fft4_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.4.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft4_qkv_b = am_model_para["text_encoder.ling_enc.fft.4.slf_attn.w_qkv.bias"].cpu().numpy()
    fft4_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.4.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft4_fc_b = am_model_para["text_encoder.ling_enc.fft.4.slf_attn.fc.bias"].cpu().numpy()
    fft4_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft4_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.w_1.bias"].cpu().numpy()
    fft4_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft4_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.w_2.bias"].cpu().numpy()
    fft4_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft4_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.4.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft4_norm_w,fft4_norm_b,fft4_qkv_w,fft4_qkv_b,fft4_fc_w,fft4_fc_b,fft4_pos_w1_w,fft4_pos_w1_b,fft4_pos_w2_w,fft4_pos_w2_b,fft4_pos_norm_w,fft4_pos_norm_b])

    fft5_norm_w = am_model_para["text_encoder.ling_enc.fft.5.slf_attn.layer_norm.weight"].cpu().numpy()
    fft5_norm_b = am_model_para["text_encoder.ling_enc.fft.5.slf_attn.layer_norm.bias"].cpu().numpy()
    fft5_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.5.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft5_qkv_b = am_model_para["text_encoder.ling_enc.fft.5.slf_attn.w_qkv.bias"].cpu().numpy()
    fft5_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.5.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft5_fc_b = am_model_para["text_encoder.ling_enc.fft.5.slf_attn.fc.bias"].cpu().numpy()
    fft5_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft5_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.w_1.bias"].cpu().numpy()
    fft5_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft5_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.w_2.bias"].cpu().numpy()
    fft5_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft5_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.5.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft5_norm_w,fft5_norm_b,fft5_qkv_w,fft5_qkv_b,fft5_fc_w,fft5_fc_b,fft5_pos_w1_w,fft5_pos_w1_b,fft5_pos_w2_w,fft5_pos_w2_b,fft5_pos_norm_w,fft5_pos_norm_b])

    fft6_norm_w = am_model_para["text_encoder.ling_enc.fft.6.slf_attn.layer_norm.weight"].cpu().numpy()
    fft6_norm_b = am_model_para["text_encoder.ling_enc.fft.6.slf_attn.layer_norm.bias"].cpu().numpy()
    fft6_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.6.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft6_qkv_b = am_model_para["text_encoder.ling_enc.fft.6.slf_attn.w_qkv.bias"].cpu().numpy()
    fft6_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.6.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft6_fc_b = am_model_para["text_encoder.ling_enc.fft.6.slf_attn.fc.bias"].cpu().numpy()
    fft6_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft6_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.w_1.bias"].cpu().numpy()
    fft6_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft6_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.w_2.bias"].cpu().numpy()
    fft6_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft6_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.6.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft6_norm_w,fft6_norm_b,fft6_qkv_w,fft6_qkv_b,fft6_fc_w,fft6_fc_b,fft6_pos_w1_w,fft6_pos_w1_b,fft6_pos_w2_w,fft6_pos_w2_b,fft6_pos_norm_w,fft6_pos_norm_b])

    fft7_norm_w = am_model_para["text_encoder.ling_enc.fft.7.slf_attn.layer_norm.weight"].cpu().numpy()
    fft7_norm_b = am_model_para["text_encoder.ling_enc.fft.7.slf_attn.layer_norm.bias"].cpu().numpy()
    fft7_qkv_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.7.slf_attn.w_qkv.weight"].cpu().numpy(),(1,0))
    fft7_qkv_b = am_model_para["text_encoder.ling_enc.fft.7.slf_attn.w_qkv.bias"].cpu().numpy()
    fft7_fc_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.7.slf_attn.fc.weight"].cpu().numpy(),(1,0))
    fft7_fc_b = am_model_para["text_encoder.ling_enc.fft.7.slf_attn.fc.bias"].cpu().numpy()
    fft7_pos_w1_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.w_1.weight"].cpu().numpy(),(2,1,0))
    fft7_pos_w1_b = am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.w_1.bias"].cpu().numpy()
    fft7_pos_w2_w = np.transpose(am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.w_2.weight"].cpu().numpy(),(2,1,0))
    fft7_pos_w2_b = am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.w_2.bias"].cpu().numpy()
    fft7_pos_norm_w = am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.layer_norm.weight"].cpu().numpy()
    fft7_pos_norm_b = am_model_para["text_encoder.ling_enc.fft.7.pos_ffn.layer_norm.bias"].cpu().numpy()
    list_para.extend([fft7_norm_w,fft7_norm_b,fft7_qkv_w,fft7_qkv_b,fft7_fc_w,fft7_fc_b,fft7_pos_w1_w,fft7_pos_w1_b,fft7_pos_w2_w,fft7_pos_w2_b,fft7_pos_norm_w,fft7_pos_norm_b])

    ln_norm_w = am_model_para["text_encoder.ling_enc.ln.weight"].cpu().numpy()
    ln_norm_b = am_model_para["text_encoder.ling_enc.ln.bias"].cpu().numpy()
    list_para.extend([ln_norm_w,ln_norm_b])

    ling_proj_w = np.transpose(am_model_para["text_encoder.ling_proj.weight"].cpu().numpy(),(1,0))
    list_para.extend([ling_proj_w])
    enc_model.get_layer(name='text_fft_encoder').set_weights(list_para)
    tflite_path =  os.path.join(dst_tflite_path,'txtenc.tflite')
    save_tflite_model(enc_model,tflite_path)
    return

def tf_variance_adaptor(am_model_para, dst_tflite_path):
    embed_input = tf.keras.Input(name='emot_in',type_spec = tf.TensorSpec(shape=[None, None], dtype=tf.int32))
    embed_out = tf.keras.layers.Embedding(36,32,name='emot_emd')(embed_input)
    emot_model = tf.keras.Model((embed_input),(embed_out))
    embed_weight = am_model_para["emo_tokenizer.weight"].cpu().numpy()
    emot_model.get_layer(name='emot_emd').set_weights([embed_weight])
    tflite_path =  os.path.join(dst_tflite_path,'emot.tflite')
    save_tflite_model(emot_model,tflite_path)

    class  FeedForwardNet(tf.keras.layers.Layer):
        def __init__(self, d_hid, d_out, kernel_size=(1, 1), **kwargs):
            super(FeedForwardNet, self).__init__(**kwargs)

            self.w_1 = tf.keras.layers.Conv1D(d_hid,kernel_size[0],strides=1,padding='same',activation='relu',use_bias=True)

            self.w_2 = tf.keras.layers.Conv1D(d_out,kernel_size[1],strides=1,padding='same',use_bias=False)

        def call(self, x):
            output = self.w_1(x)
            output = self.w_2(output)
            return output

    class  MemoryBlockV2(tf.keras.layers.Layer):
        def __init__(self, d_model, filter_size, **kwargs):
            super(MemoryBlockV2, self).__init__(**kwargs)

            self.conv_dw = tf.keras.layers.Conv1D(d_model,filter_size,strides=1,padding='valid',groups=d_model,use_bias=False)
            left_padding = (filter_size - 1) // 2
            right_padding = filter_size - 1 - left_padding
            self.pad = tf.constant([[0, 0],[left_padding, right_padding], [0,0]])

        def call(self, input):
            x = tf.pad(input, self.pad)
            output = self.conv_dw(x)
            output += input
            return output

    class  FsmnEncoderV2(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim, **kwargs):
            super(FsmnEncoderV2, self).__init__(**kwargs)
            self.ffn_0 = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.ffn_1 = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.ffn_2 = FeedForwardNet(ffn_inner_dim, num_memory_units)

            self.mem_0 = MemoryBlockV2(num_memory_units,filter_size)
            self.mem_1 = MemoryBlockV2(num_memory_units,filter_size)
            self.mem_2 = MemoryBlockV2(num_memory_units,filter_size)

        def call(self, input):
            context = self.ffn_0(input)
            memory = self.mem_0(context)
            x = memory
            context = self.ffn_1(memory)
            memory = self.mem_1(context)
            memory += x
            x = memory
            context = self.ffn_2(memory)
            memory = self.mem_2(context)
            memory += x
            x = memory
            return x

    class  VarFsmnRnnNARPredictor(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim, lstm_unit, **kwargs):
            super(VarFsmnRnnNARPredictor, self).__init__(**kwargs)
            self.fsmn = FsmnEncoderV2(filter_size,num_memory_units,ffn_inner_dim)
            self.blstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128,return_sequences=True))
            self.fc = tf.keras.layers.Dense(1)

        def call(self, inputs):
            x = self.fsmn(inputs)
            x = self.blstm(x)
            x = self.fc(x)
            #x =  tf.squeeze(x, axis=-1)
            return x

    class  Prenet(tf.keras.layers.Layer):
        def __init__(self, prenet_units, **kwargs):
            super(Prenet, self).__init__(**kwargs)
            self.fcs = tf.keras.Sequential()
            self.fcs.add(tf.keras.layers.Dense(prenet_units,activation='relu'))
            self.fcs.add(tf.keras.layers.Dense(prenet_units,activation='relu'))
        def call(self, x):
            output = self.fcs(x)
            return output

    class  VarRnnARPredictor(tf.keras.layers.Layer):
        def __init__(self,prenet_units, rnn_units, **kwargs):
            super(VarRnnARPredictor, self).__init__(**kwargs)
            self.prenet = Prenet(prenet_units,name='prenet')
            self.lstm0 = tf.keras.layers.LSTM(rnn_units,return_sequences=True,return_state=True)
            self.lstm1 = tf.keras.layers.LSTM(rnn_units,return_sequences=True,return_state=True)
            self.fc = tf.keras.layers.Dense(1,activation='relu')

        def call(self, x, cond,p_h0,p_c0,p_h1,p_c1):
            px = self.prenet(x)
            x = tf.concat([px,cond],axis=-1)
            x,h0,c0 = self.lstm0(x, initial_state=(p_h0,p_c0))
            x,h1,c1 = self.lstm1(x, initial_state=(p_h1,p_c1))
            x = self.fc(x)
            return x,h0,c0,h1,c1,tf.math.exp(x)-1

    class  DurSinusoidalPositionEncoder(tf.keras.layers.Layer):
        def __init__(self, depth, outputs_per_step, **kwargs):
            super(DurSinusoidalPositionEncoder, self).__init__(**kwargs)
            self.depth = depth
            self.outputs_per_step = outputs_per_step
            #inv_timescales = [np.power(10000, 2 * (hid_idx // 2) / depth) for hid_idx in range(depth)]
            #self.inv_timescales = tf.convert_to_tensor(inv_timescales, dtype=tf.float32)
            div_term = tf.math.exp(tf.range(0, self.depth, 2, dtype=tf.float32)*-1*(tf.math.log(10000.0)/self.depth))
            self.inv_timescales = tf.expand_dims(div_term,axis=0)

        def call(self, durations, lr_txt):
            durations = tf.squeeze(durations,axis=-1)
            reps_f = durations + 0.5
            reps = tf.cast(reps_f, dtype=tf.int32)
            output_lens = tf.math.reduce_sum(reps,axis=1)
            max_len = tf.math.reduce_max(output_lens)
            paddings = tf.constant([[0, 0,], [1, 0]])
            reps_pad = tf.pad(reps,paddings)
            reps_cumsum = tf.expand_dims(tf.math.cumsum(reps_pad,axis=1),axis=1)
            range_ = tf.expand_dims(tf.expand_dims(tf.range(max_len),axis=-1),axis=0)
            mult = (reps_cumsum[:, :, :-1] <= range_) & (reps_cumsum[:, :, 1:] > range_)
            mult = tf.cast(mult, dtype=tf.float32)
            reps_cumsum = tf.cast(reps_cumsum, dtype=tf.float32)

            offsets = tf.squeeze(tf.matmul(mult, tf.expand_dims(reps_cumsum[:, 0, :-1],axis=-1)),axis=-1)
            range_f = tf.cast(range_, dtype=tf.float32)
            dur_pos = range_f[:, :, 0] - offsets + 1
            seq_len = tf.shape(dur_pos)[1]
            padding_len = (self.outputs_per_step - (seq_len % self.outputs_per_step)) % self.outputs_per_step
            dur_pad = tf.zeros([1,padding_len],dtype=tf.float32)
            dur_pos = tf.concat([dur_pos,dur_pad],axis=-1)
            dur_pos = tf.expand_dims(dur_pos,axis=-1)
            #return dur_pos,self.inv_timescales

            pe_pos = tf.math.multiply(dur_pos,self.inv_timescales)
            pe_even = tf.expand_dims(tf.math.sin(pe_pos),-1)
            pe_odd = tf.expand_dims(tf.math.cos(pe_pos),-1)
            pe = tf.concat([pe_even,pe_odd],axis=-1)
            position_embedding = tf.reshape(pe,(-1,self.depth))
            return lr_txt + tf.expand_dims(position_embedding,axis=0)

    class  VarianceAdaptor1(tf.keras.layers.Layer):
        def __init__(self, **kwargs):
            super(VarianceAdaptor1, self).__init__(**kwargs)
            self.filter_size = 41
            self.num_memory_units = 128
            self.ffn_inner_dim = 256
            self.lstm_unit = 128
            self.dur_pred_prenet_units = 128
            self.dur_pred_lstm_units = 128
            self.encoder_projection_units = 32
            self.outputs_per_step = 3

            self.pitch_predictor = VarFsmnRnnNARPredictor(self.filter_size,self.num_memory_units,self.ffn_inner_dim,self.lstm_unit)
            self.energy_predictor = VarFsmnRnnNARPredictor(self.filter_size,self.num_memory_units,self.ffn_inner_dim,self.lstm_unit)
            #self.duration_predictor = VarRnnARPredictor(self.dur_pred_prenet_units,self.dur_pred_lstm_units)
            #self.length_regulator = LengthRegulator(self.outputs_per_step)
            #self.dur_position_encoder = DurSinusoidalPositionEncoder(self.encoder_projection_units,self.outputs_per_step)
            self.pitch_emb = tf.keras.layers.Conv1D(32,9,strides=1,padding='same',use_bias=True)
            self.energy_emb = tf.keras.layers.Conv1D(32,9,strides=1,padding='same',use_bias=True)

        def call(self, inputs_text_embedding,inputs_emo_embedding,inputs_spk_embedding):
            #inputs_spk_embedding = tf.repeat(spk_embedding,repeats=[tf.shape(inputs_text_embedding)[1]],axis=1)
            variance_predictor_inputs = tf.concat([inputs_text_embedding,inputs_spk_embedding,inputs_emo_embedding],axis=-1)
            #print('variance_predictor_inputs {}'.format(variance_predictor_inputs))
            pitch_predictions = self.pitch_predictor(variance_predictor_inputs)
            energy_predictions = self.energy_predictor(variance_predictor_inputs)
            pitch_embeddings = self.pitch_emb(pitch_predictions)
            energy_embeddings = self.energy_emb(energy_predictions)
            inputs_text_embedding_aug = (inputs_text_embedding + pitch_embeddings + energy_embeddings)
            duration_predictor_cond = tf.concat([inputs_text_embedding_aug,inputs_spk_embedding,inputs_emo_embedding],axis=-1)
            return inputs_text_embedding_aug,duration_predictor_cond

    text_input = tf.keras.Input(shape=(None,32),batch_size=1, name='txt_in')
    emo_input = tf.keras.Input(shape=(None,32),batch_size=1, name='emo_in')
    spk_input = tf.keras.Input(shape=(None,192),batch_size=1, name='spk_in')
    inputs_emb,input_pred_con = VarianceAdaptor1()(text_input,emo_input,spk_input)
    varadp_model1 = tf.keras.Model((text_input,emo_input,spk_input),(inputs_emb,input_pred_con))

    varadp1_list_para = []
    pitch_pred0_w1_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.0.w_1.weight"].cpu().numpy(),(2,1,0))
    pitch_pred0_w1_b = am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.0.w_1.bias"].cpu().numpy()
    pitch_pred0_w2_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.0.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([pitch_pred0_w1_w,pitch_pred0_w1_b,pitch_pred0_w2_w])

    pitch_pred1_w1_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.1.w_1.weight"].cpu().numpy(),(2,1,0))
    pitch_pred1_w1_b = am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.1.w_1.bias"].cpu().numpy()
    pitch_pred1_w2_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.1.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([pitch_pred1_w1_w,pitch_pred1_w1_b,pitch_pred1_w2_w])

    pitch_pred2_w1_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.2.w_1.weight"].cpu().numpy(),(2,1,0))
    pitch_pred2_w1_b = am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.2.w_1.bias"].cpu().numpy()
    pitch_pred2_w2_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.ffn_lst.2.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([pitch_pred2_w1_w,pitch_pred2_w1_b,pitch_pred2_w2_w])

    pitch_pred0_mem_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.memory_block_lst.0.conv_dw.weight"].cpu().numpy(),(2,1,0))
    pitch_pred1_mem_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.memory_block_lst.1.conv_dw.weight"].cpu().numpy(),(2,1,0))
    pitch_pred2_mem_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fsmn.memory_block_lst.2.conv_dw.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([pitch_pred0_mem_w,pitch_pred1_mem_w,pitch_pred2_mem_w])

    pitch_pred_lstm_ih_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.blstm.weight_ih_l0"].cpu().numpy(),(1,0))
    pitch_pred_lstm_hh_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.blstm.weight_hh_l0"].cpu().numpy(),(1,0))
    pitch_pred_lstm_ih_b = am_model_para["variance_adaptor.pitch_predictor.blstm.bias_ih_l0"].cpu().numpy()
    pitch_pred_lstm_hh_b = am_model_para["variance_adaptor.pitch_predictor.blstm.bias_hh_l0"].cpu().numpy()
    pitch_pred_lstmr_ih_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.blstm.weight_ih_l0_reverse"].cpu().numpy(),(1,0))
    pitch_pred_lstmr_hh_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.blstm.weight_hh_l0_reverse"].cpu().numpy(),(1,0))
    pitch_pred_lstmr_ih_b = am_model_para["variance_adaptor.pitch_predictor.blstm.bias_ih_l0_reverse"].cpu().numpy()
    pitch_pred_lstmr_hh_b = am_model_para["variance_adaptor.pitch_predictor.blstm.bias_hh_l0_reverse"].cpu().numpy()
    varadp1_list_para.extend([pitch_pred_lstm_ih_w,pitch_pred_lstm_hh_w,pitch_pred_lstm_ih_b+pitch_pred_lstm_hh_b,pitch_pred_lstmr_ih_w,pitch_pred_lstmr_hh_w,pitch_pred_lstmr_ih_b+pitch_pred_lstmr_hh_b])

    pitch_pred_fc_w = np.transpose(am_model_para["variance_adaptor.pitch_predictor.fc.weight"].cpu().numpy(),(1,0))
    pitch_pred_fc_b = am_model_para["variance_adaptor.pitch_predictor.fc.bias"].cpu().numpy()
    varadp1_list_para.extend([pitch_pred_fc_w,pitch_pred_fc_b])

    energy_pred0_w1_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.0.w_1.weight"].cpu().numpy(),(2,1,0))
    energy_pred0_w1_b = am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.0.w_1.bias"].cpu().numpy()
    energy_pred0_w2_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.0.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([energy_pred0_w1_w,energy_pred0_w1_b,energy_pred0_w2_w])

    energy_pred1_w1_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.1.w_1.weight"].cpu().numpy(),(2,1,0))
    energy_pred1_w1_b = am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.1.w_1.bias"].cpu().numpy()
    energy_pred1_w2_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.1.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([energy_pred1_w1_w,energy_pred1_w1_b,energy_pred1_w2_w])

    energy_pred2_w1_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.2.w_1.weight"].cpu().numpy(),(2,1,0))
    energy_pred2_w1_b = am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.2.w_1.bias"].cpu().numpy()
    energy_pred2_w2_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.ffn_lst.2.w_2.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([energy_pred2_w1_w,energy_pred2_w1_b,energy_pred2_w2_w])

    energy_pred0_mem_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.memory_block_lst.0.conv_dw.weight"].cpu().numpy(),(2,1,0))
    energy_pred1_mem_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.memory_block_lst.1.conv_dw.weight"].cpu().numpy(),(2,1,0))
    energy_pred2_mem_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fsmn.memory_block_lst.2.conv_dw.weight"].cpu().numpy(),(2,1,0))
    varadp1_list_para.extend([energy_pred0_mem_w,energy_pred1_mem_w,energy_pred2_mem_w])

    energy_pred_lstm_ih_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.blstm.weight_ih_l0"].cpu().numpy(),(1,0))
    energy_pred_lstm_hh_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.blstm.weight_hh_l0"].cpu().numpy(),(1,0))
    energy_pred_lstm_ih_b = am_model_para["variance_adaptor.energy_predictor.blstm.bias_ih_l0"].cpu().numpy()
    energy_pred_lstm_hh_b = am_model_para["variance_adaptor.energy_predictor.blstm.bias_hh_l0"].cpu().numpy()
    energy_pred_lstmr_ih_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.blstm.weight_ih_l0_reverse"].cpu().numpy(),(1,0))
    energy_pred_lstmr_hh_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.blstm.weight_hh_l0_reverse"].cpu().numpy(),(1,0))
    energy_pred_lstmr_ih_b = am_model_para["variance_adaptor.energy_predictor.blstm.bias_ih_l0_reverse"].cpu().numpy()
    energy_pred_lstmr_hh_b = am_model_para["variance_adaptor.energy_predictor.blstm.bias_hh_l0_reverse"].cpu().numpy()
    varadp1_list_para.extend([energy_pred_lstm_ih_w,energy_pred_lstm_hh_w,energy_pred_lstm_ih_b+energy_pred_lstm_hh_b,energy_pred_lstmr_ih_w,energy_pred_lstmr_hh_w,energy_pred_lstmr_ih_b+energy_pred_lstmr_hh_b])

    energy_pred_fc_w = np.transpose(am_model_para["variance_adaptor.energy_predictor.fc.weight"].cpu().numpy(),(1,0))
    energy_pred_fc_b = am_model_para["variance_adaptor.energy_predictor.fc.bias"].cpu().numpy()
    varadp1_list_para.extend([energy_pred_fc_w,energy_pred_fc_b])

    pitch_emb_w = np.transpose(am_model_para["variance_adaptor.pitch_emb.weight"].cpu().numpy(),(2,1,0))
    pitch_emb_b = am_model_para["variance_adaptor.pitch_emb.bias"].cpu().numpy()
    energy_emb_w = np.transpose(am_model_para["variance_adaptor.energy_emb.weight"].cpu().numpy(),(2,1,0))
    energy_emb_b = am_model_para["variance_adaptor.energy_emb.bias"].cpu().numpy()
    varadp1_list_para.extend([pitch_emb_w,pitch_emb_b,energy_emb_w,energy_emb_b])

    varadp_model1.get_layer(name='variance_adaptor1').set_weights(varadp1_list_para)

    tflite_path =  os.path.join(dst_tflite_path,'varadp.tflite')
    save_tflite_model(varadp_model1,tflite_path)


    pred_txt_in = tf.keras.Input(shape=(1,1),batch_size=1, name='txt_in')
    pred_cond = tf.keras.Input(shape=(1,256),batch_size=1, name='cond')
    p_h0 = tf.keras.Input(shape=(128,),batch_size=1, name='p_h0')
    p_c0 = tf.keras.Input(shape=(128,),batch_size=1, name='p_c0')
    p_h1 = tf.keras.Input(shape=(128,),batch_size=1, name='p_h1')
    p_c1 = tf.keras.Input(shape=(128,),batch_size=1, name='p_c1')
    duration_out,h0,c0,h1,c1,log_dur = VarRnnARPredictor(128,128)(pred_txt_in,pred_cond,p_h0,p_c0,p_h1,p_c1)
    duration_model = tf.keras.Model((pred_txt_in,pred_cond,p_h0,p_c0,p_h1,p_c1),(duration_out,h0,c0,h1,c1,log_dur))
    duration_model_layer = duration_model.get_layer(name='var_rnn_ar_predictor')
    duration_list_para = []
    duration_pred_0_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.prenet.fcs.0.weight"].cpu().numpy(),(1,0))
    duration_pred_0_b = am_model_para["variance_adaptor.duration_predictor.prenet.fcs.0.bias"].cpu().numpy()
    duration_pred_1_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.prenet.fcs.3.weight"].cpu().numpy(),(1,0))
    duration_pred_1_b = am_model_para["variance_adaptor.duration_predictor.prenet.fcs.3.bias"].cpu().numpy()
    duration_list_para.extend([duration_pred_0_w,duration_pred_0_b,duration_pred_1_w,duration_pred_1_b])

    duration_pred_lstm_0_ih_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.lstm.weight_ih_l0"].cpu().numpy(),(1,0))
    duration_pred_lstm_0_hh_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.lstm.weight_hh_l0"].cpu().numpy(),(1,0))
    duration_pred_lstm_0_ih_b = am_model_para["variance_adaptor.duration_predictor.lstm.bias_ih_l0"].cpu().numpy()
    duration_pred_lstm_0_hh_b = am_model_para["variance_adaptor.duration_predictor.lstm.bias_hh_l0"].cpu().numpy()
    duration_list_para.extend([duration_pred_lstm_0_ih_w,duration_pred_lstm_0_hh_w,duration_pred_lstm_0_ih_b+duration_pred_lstm_0_hh_b])

    duration_pred_lstm_1_ih_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.lstm.weight_ih_l1"].cpu().numpy(),(1,0))
    duration_pred_lstm_1_hh_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.lstm.weight_hh_l1"].cpu().numpy(),(1,0))
    duration_pred_lstm_1_ih_b = am_model_para["variance_adaptor.duration_predictor.lstm.bias_ih_l1"].cpu().numpy()
    duration_pred_lstm_1_hh_b = am_model_para["variance_adaptor.duration_predictor.lstm.bias_hh_l1"].cpu().numpy()
    duration_list_para.extend([duration_pred_lstm_1_ih_w,duration_pred_lstm_1_hh_w,duration_pred_lstm_1_ih_b+duration_pred_lstm_1_hh_b])

    duration_pred_fc_w = np.transpose(am_model_para["variance_adaptor.duration_predictor.fc.weight"].cpu().numpy(),(1,0))
    duration_pred_fc_b = am_model_para["variance_adaptor.duration_predictor.fc.bias"].cpu().numpy()
    duration_list_para.extend([duration_pred_fc_w,duration_pred_fc_b])
    duration_model_layer.set_weights(duration_list_para)

    tflite_path =  os.path.join(dst_tflite_path,'duration.tflite')
    save_tflite_model(duration_model,tflite_path)

    return 

def tf_mel_dec(am_model_para, dst_tflite_path):

    class  Prenet3(tf.keras.layers.Layer):
        def __init__(self, prenet_units, out_units,**kwargs):
            super(Prenet3, self).__init__(**kwargs)
            self.fcs = tf.keras.Sequential()
            #self.fcs.add(tf.keras.Input(shape=(None,in_units),batch_size=1,name='pre_in'))
            self.fcs.add(tf.keras.layers.Dense(prenet_units,activation='relu'))
            self.fcs.add(tf.keras.layers.Dense(prenet_units,activation='relu'))
            self.fcs.add(tf.keras.layers.Dense(out_units,name='dense_2'))
        def call(self, x):
            output = self.fcs(x)
            return output

    ### multihead attention
    class MultiHeadPNCAAttention(tf.keras.layers.Layer):
        def __init__(self, d_model, num_heads,**kwargs):
            super(MultiHeadPNCAAttention, self).__init__(**kwargs)
            self.num_heads = num_heads
            self.d_model = d_model
            self.depth = d_model//self.num_heads
            self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6)
            self.w_x_qkv = tf.keras.layers.Dense(d_model*3)
            self.fc_x = tf.keras.layers.Dense(d_model)
            self.fc_h = tf.keras.layers.Dense(d_model)

        def forward_x(self, x,batch_size):
            q_k_v = self.w_x_qkv(x)
            q,k,v = tf.split(q_k_v,num_or_size_splits=3,axis=-1)

            q_h = self.split_heads(q, batch_size)  # (batch_size, num_heads, seq_len_q, depth)
            k_h = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)
            v_h = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)
            return q_h, k_h, v_h

        def split_heads(self, x, batch_size):
            """分拆最后一个维度到 (num_heads, depth).
               转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
            """
            x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
            return tf.transpose(x, perm=[0, 2, 1, 3])

        def scaled_dot_product_attention(self,q, k, v,mask):
            """计算注意力权重。
            q, k, v 必须具有匹配的前置维度。
            k, v 必须有匹配的倒数第二个维度，例如：seq_len_k = seq_len_v。
            虽然 mask 根据其类型（填充或前瞻）有不同的形状，
            但是 mask 必须能进行广播转换以便求和。

            参数:
                q: 请求的形状 == (..., seq_len_q, depth)
                k: 主键的形状 == (..., seq_len_k, depth)
                v: 数值的形状 == (..., seq_len_v, depth_v)
                mask: Float 张量，其形状能转换成
               (..., seq_len_q, seq_len_k)。默认为None。

                返回值:
                输出，注意力权重
                """
            #revmask = tf.constant(0,dtype=tf.bool)
            #mask = tf.expand_dims(tf.math.equal(revmask,tfone), axis=1)

            matmul_qk = tf.matmul(q, k, transpose_b=True)  # (..., seq_len_q, seq_len_k)

            # 缩放 matmul_qk
            dk = tf.cast(tf.shape(k)[-1], tf.float32)
            scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)

            # 将 mask 加入到缩放的张量上。
            scaled_attention_logits = tf.where(mask,1e-10,scaled_attention_logits)
            scaled_attention_logits += (tf.cast(mask,dtype=tf.float32) * -1e9)

            # softmax 在最后一个轴（seq_len_k）上归一化，因此分数
            # 相加等于1。
            attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)  # (..., seq_len_q, seq_len_k)
            attention_weights = tf.where(mask,0.0,attention_weights)

            output = tf.matmul(attention_weights, v)  # (..., seq_len_q, depth_v)

            return output

        def call(self, x,px_k,px_v,h_k,h_v,x_mask,h_mask):
            residual = x
            batch_size = tf.shape(x)[0]
            x = self.norm(x)
            x_q,x_k,x_v = self.forward_x(x,batch_size)
            x_k = tf.concat([px_k,x_k],axis=2)
            x_v = tf.concat([px_v,x_v],axis=2)
            # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
            # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
            x_scaled_attention = self.scaled_dot_product_attention(
                    x_q, x_k[:,:,1:,:], x_v[:,:,1:,:],x_mask)

            x_scaled_attention = tf.transpose(x_scaled_attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len_q, num_heads, depth)

            x_concat_attention = tf.reshape(x_scaled_attention, 
                  (batch_size, -1, self.d_model))  # (batch_size, seq_len_q, d_model)

            output_x = self.fc_x(x_concat_attention)  # (batch_size, seq_len_q, d_model)

            h_scaled_attention = self.scaled_dot_product_attention(
                  x_q, h_k, h_v,h_mask)
            h_scaled_attention = tf.transpose(h_scaled_attention, perm=[0, 2, 1, 3])  # (batch_size, seq_len_q, num_heads, depth)
            h_concat_attention = tf.reshape(h_scaled_attention, 
                  (batch_size, -1, self.d_model))  # (batch_size, seq_len_q, d_model)
            output_h = self.fc_h(h_concat_attention)  # (batch_size, seq_len_q, d_model)
            output = output_x + output_h
            output += residual
            #print('self out {}'.format(output))

            return output,x_k,x_v

    class MultiHeadPNCAAttention_h(tf.keras.layers.Layer):
        def __init__(self, d_model, num_heads,**kwargs):
            super(MultiHeadPNCAAttention_h, self).__init__(**kwargs)
            self.num_heads = num_heads
            self.d_model = d_model
            self.depth = d_model//self.num_heads
            #self.w_x_qkv = tf.keras.layers.Dense(d_model*3)
            #self.fc_x = tf.keras.layers.Dense(d_model)
            self.w_h_kv = tf.keras.layers.Dense(d_model*2)
            #self.fc_h = tf.keras.layers.Dense(d_model)

        def split_heads(self, x, batch_size):
            """分拆最后一个维度到 (num_heads, depth).
               转置结果使得形状为 (batch_size, num_heads, seq_len, depth)
            """
            x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
            return tf.transpose(x, perm=[0, 2, 1, 3])

        def call(self, h):
            batch_size = tf.shape(h)[0]
            k_v = self.w_h_kv(h)
            k,v = tf.split(k_v,num_or_size_splits=2,axis=-1)

            k_h = self.split_heads(k, batch_size)  # (batch_size, num_heads, seq_len_k, depth)
            v_h = self.split_heads(v, batch_size)  # (batch_size, num_heads, seq_len_v, depth)
            return k_h, v_h

    class  HybridAttentionDecoder_h(tf.keras.layers.Layer):
        def __init__(self, d_model, num_heads,**kwargs):
            super(HybridAttentionDecoder_h, self).__init__(**kwargs)
            self.att = []
            for layi in range(12):
                self.att.append(MultiHeadPNCAAttention_h(d_model,num_heads))

        def call(self, h):
            khlist = []
            vhlist = []
            for layi in range(12):
                kh,vh = self.att[layi](h)
                khlist.append(kh)
                vhlist.append(vh)
                #khlist = tf.concat([khlist,kh],axis=2)
                #vhlist = tf.concat([khlist,kh],axis=2)
            return tf.concat(khlist,axis=0),tf.concat(vhlist,axis=0)
            #return khlist,vhlist
    
    mem_in = tf.keras.Input(shape=(None,320),batch_size=1, name='mem_in')
    hk_out,hv_out = HybridAttentionDecoder_h(128,8)(mem_in)
    hdec_model = tf.keras.Model((mem_in),(hk_out,hv_out))
    hdec_model_layer = hdec_model.get_layer(name='hybrid_attention_decoder_h')
    hdec_model_para = []
    for layeri in range(12):
        hdec_w_h_kv_weight = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.w_h_kv.weight".format(layeri)].cpu().numpy(),(1,0))
        hdec_w_h_kv_bias = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.w_h_kv.bias".format(layeri)].cpu().numpy()
        hdec_model_para.extend([hdec_w_h_kv_weight,hdec_w_h_kv_bias])
    hdec_model_layer.set_weights(hdec_model_para)

    tflite_path =  os.path.join(dst_tflite_path,'attenH.tflite')
    save_tflite_model(hdec_model,tflite_path)


    class  PNCABlock(tf.keras.layers.Layer):
        def __init__(self, d_model, num_heads,d_inner, **kwargs):
            super(PNCABlock, self).__init__(**kwargs)

            self.pnca_attn = MultiHeadPNCAAttention(d_model,num_heads)
            self.pos_ffn = PositionwiseConvFeedForward(d_model,d_inner,kernel_size=(1,1))

        def call(self, x,x_k,x_v,h_k,h_v,x_mask,h_mask):
            output,xk,xv = self.pnca_attn(x,x_k,x_v,h_k,h_v,x_mask,h_mask)
            output = self.pos_ffn(output)
            return output,xk,xv

    class  HybridAttentionDecoder_1(tf.keras.layers.Layer):
        def __init__(self, prenet_units, d_model,num_heads,d_inner,d_out, **kwargs):
            super(HybridAttentionDecoder_1, self).__init__(**kwargs)
            self.scale = tf.math.sqrt(tf.constant([d_model],dtype=tf.float32))
            self.prenet = Prenet3(prenet_units,d_model)
            self.dec_in_proj = tf.keras.layers.Dense(d_model)

        def call(self, inputs, memory):
            inputs = self.prenet(inputs)
            inputs = tf.concat([memory,inputs],axis=-1)
            inputs = self.dec_in_proj(inputs)
            dec_output = inputs * self.scale
            return dec_output

    class  HybridAttentionDecoder_2(tf.keras.layers.Layer):
        def __init__(self, prenet_units, d_model,num_heads,d_inner,d_out, **kwargs):
            super(HybridAttentionDecoder_2, self).__init__(**kwargs)
            self.ln = tf.keras.layers.LayerNormalization(epsilon=1e-6)
            self.dec_out_proj = tf.keras.layers.Dense(d_out)

        def call(self,pnca_output):
            dec_output = self.ln(pnca_output)
            dec_output = self.dec_out_proj(dec_output)
            return dec_output

    class  get_pnca_attn_mask(tf.keras.layers.Layer):
        def __init__(self, **kwargs):
            super(get_pnca_attn_mask, self).__init__(**kwargs)

        def call(self,max_len,x_band_width,h_band_width):
            max_len = max_len[0,0]
            x_band_width = x_band_width[0,0]
            h_band_width = h_band_width[0,0]
            range_ = tf.range(max_len)
            x_start = tf.clip_by_value(range_ - x_band_width, clip_value_min=0,clip_value_max=max_len-1)
            x_start = tf.expand_dims(tf.expand_dims(x_start,axis=0),axis=0)
            x_end = tf.expand_dims(tf.expand_dims(range_+1,axis=0),axis=0)

            h_start = tf.expand_dims(tf.expand_dims(range_,axis=0),axis=0)
            h_end = tf.clip_by_value(range_ + h_band_width + 1, clip_value_min=0,clip_value_max=max_len + 1)
            h_end = tf.expand_dims(tf.expand_dims(h_end,axis=0),axis=0)

            #pnca_x_attn_mask = ~(x_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0) & x_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0))
            #pnca_h_attn_mask = ~(h_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0) & h_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0))
            pnca_x_attn_mask = tf.math.logical_not(tf.math.logical_and(x_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0), x_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0)))
            pnca_h_attn_mask = tf.math.logical_not(tf.math.logical_and(h_start <= tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0), h_end > tf.expand_dims(tf.expand_dims(range_,axis=-1),axis=0)))

            pnca_x_attn_mask = tf.transpose(pnca_x_attn_mask, perm=[0, 2, 1])
            pnca_h_attn_mask = tf.transpose(pnca_h_attn_mask, perm=[0, 2, 1])
            return pnca_x_attn_mask,pnca_h_attn_mask


    mel_in = tf.keras.Input(shape=(1,82),batch_size=1, name='mel_in')
    mem_in = tf.keras.Input(shape=(1,320),batch_size=1, name='mem_in')
    dec1_out = HybridAttentionDecoder_1(256,128,8,1024,246)(mel_in,mem_in)
    dec1_model = tf.keras.Model((mel_in,mem_in),(dec1_out))
    dec1_model_layer = dec1_model.get_layer(name='hybrid_attention_decoder_1')
    dec1_model_para = []
    mel_dec_fcs0_w = np.transpose(am_model_para["mel_decoder.mel_dec.prenet.fcs.0.weight"].cpu().numpy(),(1,0))
    mel_dec_fcs0_b = am_model_para["mel_decoder.mel_dec.prenet.fcs.0.bias"].cpu().numpy()
    mel_dec_fcs1_w = np.transpose(am_model_para["mel_decoder.mel_dec.prenet.fcs.3.weight"].cpu().numpy(),(1,0))
    mel_dec_fcs1_b = am_model_para["mel_decoder.mel_dec.prenet.fcs.3.bias"].cpu().numpy()
    mel_dec_fcs2_w = np.transpose(am_model_para["mel_decoder.mel_dec.prenet.fcs.6.weight"].cpu().numpy(),(1,0))
    mel_dec_fcs2_b = am_model_para["mel_decoder.mel_dec.prenet.fcs.6.bias"].cpu().numpy()
    mel_dec_inproj_w = np.transpose(am_model_para["mel_decoder.mel_dec.dec_in_proj.weight"].cpu().numpy(),(1,0))
    mel_dec_inproj_b = am_model_para["mel_decoder.mel_dec.dec_in_proj.bias"].cpu().numpy()
    dec1_model_layer.set_weights([mel_dec_fcs0_w,mel_dec_fcs0_b,mel_dec_fcs1_w,mel_dec_fcs1_b,mel_dec_fcs2_w,mel_dec_fcs2_b,mel_dec_inproj_w,mel_dec_inproj_b])

    tflite_path =  os.path.join(dst_tflite_path,'meldec1.tflite')
    save_tflite_model(dec1_model,tflite_path)

    dec2_pncas = []
    for layi in range(12):
        x_in = tf.keras.Input(shape=(1,128),batch_size=1, name='x_q_in')
        x_k_in = tf.keras.Input(shape=(8,None,16),batch_size=1, name='x_k_in')
        x_v_in = tf.keras.Input(shape=(8,None,16),batch_size=1, name='x_v_in')
        h_k_in = tf.keras.Input(shape=(8,None,16),batch_size=1, name='h_k_in')
        h_v_in = tf.keras.Input(shape=(8,None,16),batch_size=1, name='h_v_in')
        x_mask_in = tf.keras.Input(shape=(1,None),batch_size=1,dtype=tf.bool, name='x_mask_in')
        h_mask_in = tf.keras.Input(shape=(1,None),batch_size=1, dtype=tf.bool,name='h_mask_in')
        dec2_out,x_k_out,x_v_out = PNCABlock(128,8,1024,name='dec2pnca_{}'.format(layi))(x_in,x_k_in,x_v_in,h_k_in,h_v_in,x_mask_in,h_mask_in)
        dec2_pnca_model = tf.keras.Model((x_in,x_k_in,x_v_in,h_k_in,h_v_in,x_mask_in,h_mask_in),(dec2_out,x_k_out,x_v_out))
        dec2_pncas.append(dec2_pnca_model)

    for layeri in range(12):
        mel_dec_attn_norm_w = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.layer_norm.weight".format(layeri)].cpu().numpy()
        mel_dec_attn_norm_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.layer_norm.bias".format(layeri)].cpu().numpy()
        mel_dec_attn_x_qkv_w = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.w_x_qkv.weight".format(layeri)].cpu().numpy(),(1,0))
        mel_dec_attn_x_qkv_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.w_x_qkv.bias".format(layeri)].cpu().numpy()
        mel_dec_attn_fcx_w = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.fc_x.weight".format(layeri)].cpu().numpy(),(1,0))
        mel_dec_attn_fcx_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.fc_x.bias".format(layeri)].cpu().numpy()
        mel_dec_attn_fch_w = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.fc_h.weight".format(layeri)].cpu().numpy(),(1,0))
        mel_dec_attn_fch_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pnca_attn.fc_h.bias".format(layeri)].cpu().numpy()
        mel_dec_pos_w1_w = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.w_1.weight".format(layeri)].cpu().numpy(),(2,1,0))
        mel_dec_pos_w1_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.w_1.bias".format(layeri)].cpu().numpy()
        mel_dec_pos_w2_w = np.transpose(am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.w_2.weight".format(layeri)].cpu().numpy(),(2,1,0))
        mel_dec_pos_w2_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.w_2.bias".format(layeri)].cpu().numpy()
        mel_dec_pos_norm_w = am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.layer_norm.weight".format(layeri)].cpu().numpy()
        mel_dec_pos_norm_b = am_model_para["mel_decoder.mel_dec.pnca.{}.pos_ffn.layer_norm.bias".format(layeri)].cpu().numpy()
        dec2_pnca_layer = dec2_pncas[layeri].get_layer(name='dec2pnca_{}'.format(layeri))
        dec2_pnca_layer.set_weights([mel_dec_attn_norm_w,mel_dec_attn_norm_b,mel_dec_attn_x_qkv_w,mel_dec_attn_x_qkv_b,mel_dec_attn_fcx_w,mel_dec_attn_fcx_b,mel_dec_attn_fch_w,mel_dec_attn_fch_b,mel_dec_pos_w1_w,mel_dec_pos_w1_b,mel_dec_pos_w2_w,mel_dec_pos_w2_b,mel_dec_pos_norm_w,mel_dec_pos_norm_b])
        tflite_path =  os.path.join(dst_tflite_path,'dec2pnca_{}.tflite'.format(layeri))
        save_tflite_model(dec2_pncas[layeri],tflite_path)

    pnca_in = tf.keras.Input(shape=(1,128),batch_size=1, name='pnca_in')
    dec2_out = HybridAttentionDecoder_2(256,128,8,1024,246)(pnca_in)
    dec2_model = tf.keras.Model((pnca_in),(dec2_out))
    dec2_model.summary()
    dec2_model_layer = dec2_model.get_layer(name='hybrid_attention_decoder_2')

    dec2_model_para = []
    mel_dec_ln_w = am_model_para["mel_decoder.mel_dec.ln.weight"].cpu().numpy()
    mel_dec_ln_b = am_model_para["mel_decoder.mel_dec.ln.bias"].cpu().numpy()
    mel_dec_out_proj_w = np.transpose(am_model_para["mel_decoder.mel_dec.dec_out_proj.weight"].cpu().numpy(),(1,0))
    mel_dec_out_proj_b = am_model_para["mel_decoder.mel_dec.dec_out_proj.bias"].cpu().numpy()
    dec2_model_para.extend([mel_dec_ln_w,mel_dec_ln_b,mel_dec_out_proj_w,mel_dec_out_proj_b])
    dec2_model_layer.set_weights(dec2_model_para)

    tflite_path =  os.path.join(dst_tflite_path,'meldec2.tflite')
    save_tflite_model(dec2_model,tflite_path)

    return 


def tf_mel_post(am_model_para, dst_tflite_path):
    class  FeedForwardNet(tf.keras.layers.Layer):
        def __init__(self, d_hid, d_out, kernel_size=(1, 1), **kwargs):
            super(FeedForwardNet, self).__init__(**kwargs)
    
            self.w_1 = tf.keras.layers.Conv1D(d_hid,kernel_size[0],strides=1,padding='same',activation='relu',use_bias=True)
    
            self.w_2 = tf.keras.layers.Conv1D(d_out,kernel_size[1],strides=1,padding='same',use_bias=False)
    
        def call(self, x):
            output = self.w_1(x)
            output = self.w_2(output)
            return output
    
    class  MemoryBlockV2(tf.keras.layers.Layer):
        def __init__(self, d_model, filter_size, **kwargs):
            super(MemoryBlockV2, self).__init__(**kwargs)
    
            self.conv_dw = tf.keras.layers.Conv1D(d_model,filter_size,strides=1,padding='valid',groups=d_model,use_bias=False)
    
        def call(self, input,mask,lpadding,rpadding):
            x = tf.where(mask,input,tf.zeros([1]))
            #x = tf.pad(x, self.pad)
            x=tf.concat([lpadding,x,rpadding],axis=1)
            output = self.conv_dw(x)
            output += input
            output = tf.where(mask,output,tf.zeros([1]))
            return output

    class  PostNet0(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim,lstm_unit,num_mel, **kwargs):
            super(PostNet0, self).__init__(**kwargs)
            self.ffn = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.mem = MemoryBlockV2(num_memory_units,filter_size)

        def call(self, input,mask,lpadding,rpadding):
            context = self.ffn(input)
            memory = self.mem(context,mask,lpadding,rpadding)
            return memory,context
    class  PostNet1(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim,lstm_unit,num_mel, **kwargs):
            super(PostNet1, self).__init__(**kwargs)
            self.ffn = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.mem = MemoryBlockV2(num_memory_units,filter_size)

        def call(self, input,mask,lpadding,rpadding):
            context = self.ffn(input)
            memory = self.mem(context,mask,lpadding,rpadding)
            return tf.math.add(memory,input),context

    class  PostNet2(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim,lstm_unit,num_mel, **kwargs):
            super(PostNet2, self).__init__(**kwargs)
            self.ffn = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.mem = MemoryBlockV2(num_memory_units,filter_size)

        def call(self, input,mask,lpadding,rpadding):
            context = self.ffn(input)
            memory = self.mem(context,mask,lpadding,rpadding)
            return tf.math.add(memory,input),context
    class  PostNet3(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim,lstm_unit,num_mel, **kwargs):
            super(PostNet3, self).__init__(**kwargs)
            self.ffn = FeedForwardNet(ffn_inner_dim, num_memory_units)
            self.mem = MemoryBlockV2(num_memory_units,filter_size)

        def call(self, input,mask,lpadding,rpadding):
            context = self.ffn(input)
            memory = self.mem(context,mask,lpadding,rpadding)
            return tf.math.add(memory,input),context

    class  PostNet(tf.keras.layers.Layer):
        def __init__(self, filter_size, num_memory_units, ffn_inner_dim,lstm_unit,num_mel, **kwargs):
            super(PostNet, self).__init__(**kwargs)
            #self.fsmn = FsmnEncoderV2(filter_size,num_memory_units,ffn_inner_dim)
            self.lstm = tf.keras.layers.LSTM(lstm_unit,return_sequences=True,return_state=True)
            self.fc = tf.keras.layers.Dense(num_mel)

        #def call(self, x, mask,lpad,rpad):
        def call(self, x,p_h,p_c,res_x):
            #postnet_fsmn_output,memout = self.fsmn(x,mask,lpad,rpad)
            postnet_lstm_output,h,c = self.lstm(x,initial_state=[p_h,p_c])
            mel_residual_output = self.fc(postnet_lstm_output)
            return tf.math.add(mel_residual_output,res_x),h,c

    post_in = tf.keras.Input(shape=(None,82),batch_size=1, name='post_in')
    post_mask = tf.keras.Input(shape=(None,1),batch_size=1, dtype=tf.bool,name='mask')
    post_lpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='lpadding')
    post_rpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='rpadding')
    post_out,memout = PostNet0(41,256,512,128,82)(post_in,post_mask,post_lpadding,post_rpadding)
    post0_model = tf.keras.Model((post_in,post_mask,post_lpadding,post_rpadding),(post_out,memout))
    post0_model.summary()
    post_model_layer = post0_model.get_layer(name='post_net0')
    layeri = 0
    post_fsmn_w1_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_fsmn_w1_b = am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.bias".format(layeri)].cpu().numpy()
    post_fsmn_w2_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_2.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_memory_w = np.transpose(am_model_para["mel_postnet.fsmn.memory_block_lst.{}.conv_dw.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_model_layer.set_weights([post_fsmn_w1_w,post_fsmn_w1_b,post_fsmn_w2_w,post_memory_w])
    tflite_path =  os.path.join(dst_tflite_path,'post0.tflite')
    save_tflite_model(post0_model,tflite_path)

    post_in = tf.keras.Input(shape=(None,256),batch_size=1, name='post_in')
    post_mask = tf.keras.Input(shape=(None,1),batch_size=1, dtype=tf.bool,name='mask')
    post_lpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='lpadding')
    post_rpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='rpadding')
    post_out,memout = PostNet1(41,256,512,128,82)(post_in,post_mask,post_lpadding,post_rpadding)
    post1_model = tf.keras.Model((post_in,post_mask,post_lpadding,post_rpadding),(post_out,memout))
    post1_model.summary()
    post_model_layer = post1_model.get_layer(name='post_net1')
    layeri += 1
    post_fsmn_w1_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_fsmn_w1_b = am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.bias".format(layeri)].cpu().numpy()
    post_fsmn_w2_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_2.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_memory_w = np.transpose(am_model_para["mel_postnet.fsmn.memory_block_lst.{}.conv_dw.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_model_layer.set_weights([post_fsmn_w1_w,post_fsmn_w1_b,post_fsmn_w2_w,post_memory_w])
    tflite_path =  os.path.join(dst_tflite_path,'post1.tflite')
    save_tflite_model(post1_model,tflite_path)

    post_in = tf.keras.Input(shape=(None,256),batch_size=1, name='post_in')
    post_mask = tf.keras.Input(shape=(None,1),batch_size=1, dtype=tf.bool,name='mask')
    post_lpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='lpadding')
    post_rpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='rpadding')
    post_out,memout = PostNet2(41,256,512,128,82)(post_in,post_mask,post_lpadding,post_rpadding)
    post2_model = tf.keras.Model((post_in,post_mask,post_lpadding,post_rpadding),(post_out,memout))
    post2_model.summary()
    post_model_layer = post2_model.get_layer(name='post_net2')
    layeri += 1
    post_fsmn_w1_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_fsmn_w1_b = am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.bias".format(layeri)].cpu().numpy()
    post_fsmn_w2_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_2.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_memory_w = np.transpose(am_model_para["mel_postnet.fsmn.memory_block_lst.{}.conv_dw.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_model_layer.set_weights([post_fsmn_w1_w,post_fsmn_w1_b,post_fsmn_w2_w,post_memory_w])
    tflite_path =  os.path.join(dst_tflite_path,'post2.tflite')
    save_tflite_model(post2_model,tflite_path)

    post_in = tf.keras.Input(shape=(None,256),batch_size=1, name='post_in')
    post_mask = tf.keras.Input(shape=(None,1),batch_size=1, dtype=tf.bool,name='mask')
    post_lpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='lpadding')
    post_rpadding = tf.keras.Input(shape=(None,256),batch_size=1, name='rpadding')
    post_out,memout = PostNet3(41,256,512,128,82)(post_in,post_mask,post_lpadding,post_rpadding)
    post3_model = tf.keras.Model((post_in,post_mask,post_lpadding,post_rpadding),(post_out,memout))
    post3_model.summary()
    post_model_layer = post3_model.get_layer(name='post_net3')
    layeri += 1
    post_fsmn_w1_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_fsmn_w1_b = am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_1.bias".format(layeri)].cpu().numpy()
    post_fsmn_w2_w = np.transpose(am_model_para["mel_postnet.fsmn.ffn_lst.{}.w_2.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_memory_w = np.transpose(am_model_para["mel_postnet.fsmn.memory_block_lst.{}.conv_dw.weight".format(layeri)].cpu().numpy(),(2,1,0))
    post_model_layer.set_weights([post_fsmn_w1_w,post_fsmn_w1_b,post_fsmn_w2_w,post_memory_w])
    tflite_path =  os.path.join(dst_tflite_path,'post3.tflite')
    save_tflite_model(post3_model,tflite_path)

    post_in = tf.keras.Input(shape=(None,256),batch_size=1, name='post_in')
    post_ph = tf.keras.Input(shape=(128),batch_size=1, name='p_h')
    post_pc = tf.keras.Input(shape=(128),batch_size=1, name='p_c')
    post_res = tf.keras.Input(shape=(None,82),batch_size=1, name='post_res')
    post_out,post_h,post_c = PostNet(41,256,512,128,82)(post_in,post_ph,post_pc,post_res)
    post_model = tf.keras.Model((post_in,post_ph,post_pc,post_res),(post_out,post_h,post_c))
    post_model.summary()
    post_model_layer = post_model.get_layer(name='post_net')
    post_lstm_ih_w = np.transpose(am_model_para["mel_postnet.lstm.weight_ih_l0"].cpu().numpy(),(1,0))
    post_lstm_hh_w = np.transpose(am_model_para["mel_postnet.lstm.weight_hh_l0"].cpu().numpy(),(1,0))
    post_lstm_ih_b = am_model_para["mel_postnet.lstm.bias_ih_l0"].cpu().numpy()
    post_lstm_hh_b = am_model_para["mel_postnet.lstm.bias_hh_l0"].cpu().numpy()
    post_fc_w = np.transpose(am_model_para["mel_postnet.fc.weight"].cpu().numpy(),(1,0))
    post_fc_b = am_model_para["mel_postnet.fc.bias"].cpu().numpy()
    post_model_layer.set_weights([post_lstm_ih_w,post_lstm_hh_w,post_lstm_ih_b+post_lstm_hh_b,post_fc_w,post_fc_b])

    tflite_path =  os.path.join(dst_tflite_path,'post.tflite')
    save_tflite_model(post_model,tflite_path)

    return


def convert_vcmodel_totf(pretrain_dir, dst_path):
    am_param = load_pretrain_model(pretrain_dir,dst_path)

    tf_tts_infer(am_param, dst_path)
    tf_variance_adaptor(am_param, dst_path)
    tf_mel_dec(am_param, dst_path)
    tf_mel_post(am_param, dst_path)

if __name__=='__main__':
    convert_vcmodel_totf(sys.argv[1],sys.argv[2])
