import sys
import os
import time
import tensorflow as tf
import numpy as np
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#model_dir = os.path.abspath("./pretrain_yangmi")
model_dir = os.path.abspath(sys.argv[1])

#custom_infer_abs = {
#    'voice_name':
#    'F7',
#    'am_ckpt':
#    os.path.join(model_dir, 'tmp_am', 'ckpt'),
#    'am_config':
#    os.path.join(model_dir, 'tmp_am', 'config.yaml'),
#    'voc_ckpt':
#    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
#    'voc_config':
#    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
#             'config.yaml'),
#    'audio_config':
#    os.path.join(model_dir, 'data', 'audio_config.yaml'),
#    'se_file':
#    os.path.join(model_dir, 'data', 'se', 'se.npy')
#}
#kwargs = {'custom_ckpt': custom_infer_abs}
#
#model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)
#
#inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
#srctxt = sys.argv[2]
#dstpath = sys.argv[3]
#with open(srctxt, 'r') as f:
#   alltxt = f.readlines() 
#for txtline in alltxt:
#    txtline=txtline.strip().split()
#    start = time.time()
#    output = inference(input=txtline[1])
#    print('spend time {}'.format(time.time()-start))
#    wav = output[OutputKeys.OUTPUT_WAV]
#    with open(os.path.join(dstpath,txtline[0]+".wav"), 'wb') as f:
#        f.write(wav)


#print('voc para')
#voc_model_para = model_id.voices['F7'].voc_model.state_dict()
#for k,v in voc_model_para.items():
#     print("{:20s} {}".format(k, v.shape))
def voc_infer(post_mel):
#def voc_infer():

    #hifi_model.save('tfmodel/hifigan')

    #model_path = 'tfmodel/hifigan4'
#    for model_path in ['tfmodel/hifigan0','tfmodel/hifigan1','tfmodel/hifigan2','tfmodel/hifigan3','tfmodel/hifigan4','tfmodel/hifigan5']:
#        converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
#        #converter.optimizations = [tf.lite.Optimize.DEFAULT]
#        #converter.target_spec.supported_types = [tf.float16]
#        converter.target_spec.supported_ops = [
#            tf.lite.OpsSet.TFLITE_BUILTINS,  # enable TensorFlow Lite ops.
#            tf.lite.OpsSet.SELECT_TF_OPS  # enable TensorFlow ops.
#        ]
#        tflite_model = converter.convert()
#        # Print the signatures from the converted model
#        interpreter = tf.lite.Interpreter(model_content=tflite_model)
#        signatures = interpreter.get_signature_list()
#        print(signatures)
#        tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
#        with open(tflite_path,'wb') as f:
#            f.write(tflite_model)

    #for model_path in ['tfmodel/hifigan4']:
    #    converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
    #    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    #    converter.target_spec.supported_types = [tf.float16]
    #    converter.target_spec.supported_ops = [
    #        tf.lite.OpsSet.TFLITE_BUILTINS,  # enable TensorFlow Lite ops.
    #        #tf.lite.OpsSet.SELECT_TF_OPS  # enable TensorFlow ops.
    #    ]
    #    tflite_model = converter.convert()
    #    # Print the signatures from the converted model
    #    interpreter = tf.lite.Interpreter(model_content=tflite_model)
    #    signatures = interpreter.get_signature_list()
    #    print(signatures)
    #    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #    with open(tflite_path,'wb') as f:
    #        f.write(tflite_model)

    #model_path = 'tfmodel/hifigan0'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #hifi_in = tf.expand_dims(tf.convert_to_tensor(post_mel.cpu().numpy()),axis=0)
    #sig_out = sig(hifi0_in=hifi_in)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan0']
    #exci_out = sig_out['hifigan0_1']

    #model_path = 'tfmodel/hifigan1'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #sig_out = sig(hifi1_in=hifi_out,hifi1_exci=exci_out)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan1']
    #model_path = 'tfmodel/hifigan2'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #sig_out = sig(hifi2_in=hifi_out,hifi2_exci=exci_out)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan2']
    #model_path = 'tfmodel/hifigan3'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #sig_out = sig(hifi3_in=hifi_out,hifi3_exci=exci_out)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan3']

    #model_path = 'tfmodel/hifigan4'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #sig_out = sig(hifi4_in=hifi_out,hifi4_exci=exci_out)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan4']

    #model_path = 'tfmodel/hifigan5'
    #tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    #hifi_model = tf.lite.Interpreter(tflite_path)
    #sig = hifi_model.get_signature_runner('serving_default')
    #start = time.time()
    #sig_out = sig(hifi5_in=hifi_out)
    #print('hifi time {}'.format(time.time()-start))
    #hifi_out = sig_out['hifigan5']

    model_path = 'tfmodel/hifigan0'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi0_model = tf.lite.Interpreter(tflite_path)
    sig = hifi0_model.get_signature_runner('serving_default')
    #hifi_in = tf.expand_dims(tf.convert_to_tensor(post_mel.cpu().numpy()),axis=0)
    hifi_in = tf.expand_dims(tf.convert_to_tensor(post_mel),axis=0)
    hifi_out_list = []
    exci_out_list = []
    time_len = tf.shape(hifi_in).numpy()[1]
    #hifi0_win = 48
    #hifi0_olp = 4
    hifi0_win = 21
    hifi0_olp = 3
    for step in range(hifi0_olp,time_len,hifi0_win-2*hifi0_olp):
        start = step-hifi0_olp
        end = step+hifi0_win-hifi0_olp
        melstep = hifi_in[:,start:end,:]
        rpad = hifi0_win - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
        sig_out = sig(hifi0_in=melstep)
        hifi_out_step = sig_out['hifigan0']
        exci_out_step = sig_out['hifigan0_1']
        #print('hifi_out {} {}'.format(hifi_out_step.shape,exci_out_step.shape))
        if step == hifi0_olp:
            hifi_out_step = hifi_out_step[:,:-hifi0_olp,:]
            exci_out_step = exci_out_step[:,:-hifi0_olp*200,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,hifi0_olp:-hifi0_olp,:]
                exci_out_step = exci_out_step[:,hifi0_olp*200:-hifi0_olp*200,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,hifi0_olp:-(rpad),:]
                    exci_out_step = exci_out_step[:,hifi0_olp*200:-(rpad*200),:]
                else:
                    hifi_out_step = hifi_out_step[:,hifi0_olp:,:]
                    exci_out_step = exci_out_step[:,hifi0_olp*200:,:]

        hifi_out_list.append(hifi_out_step)
        exci_out_list.append(exci_out_step)
        if end > time_len:
            break

    hifi0_out = tf.concat(hifi_out_list,axis=1)
    exci_out = tf.concat(exci_out_list,axis=1)
    print('hifi0 {} {}'.format(hifi0_out.shape,exci_out.shape))

    model_path = 'tfmodel/hifigan1'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi1_model = tf.lite.Interpreter(tflite_path)
    sig = hifi1_model.get_signature_runner('serving_default')

    hifi1_win = 21
    hifi1_olp = 3
    hifi1_up = 10
    hifi_out_list = []
    time_len = tf.shape(hifi0_out).numpy()[1]
    for step in range(hifi1_olp,time_len,hifi1_win-2*hifi1_olp):
        start = step-hifi1_olp
        end = step+hifi1_win-hifi1_olp
        melstep = hifi0_out[:,start:end,:]
        excistep = exci_out[:,start*200:end*200]
        rpad = hifi1_win - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
            excistep = tf.concat([excistep,tf.zeros([1,rpad*200,excistep.shape[2]])],axis=1)
        #hifi_out_step = hifi1_model((melstep,excistep),training=True)
        sig_out = sig(hifi1_in=melstep,hifi1_exci=excistep)
        hifi_out_step = sig_out['hifigan1']
        #print('hifi_out {} '.format(hifi_out_step.shape))
        if step == hifi1_olp:
            hifi_out_step = hifi_out_step[:,:-30,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,30:-30,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,30:-(rpad*10),:]
                else:
                    hifi_out_step = hifi_out_step[:,30:,:]

        hifi_out_list.append(hifi_out_step)
        if end > time_len:
            break

    hifi1_out = tf.concat(hifi_out_list,axis=1)
    print('hifi1 {}'.format(hifi1_out.shape))

    model_path = 'tfmodel/hifigan2'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi2_model = tf.lite.Interpreter(tflite_path)
    sig = hifi2_model.get_signature_runner('serving_default')
    hifi2_win = 150
    hifi2_olp = 5
    hifi2_up = 5
    hifi_out_list = []
    time_len = tf.shape(hifi1_out).numpy()[1]
    for step in range(hifi2_olp,time_len,hifi2_win-2*hifi2_olp):
        start = step-hifi2_olp
        end = step+hifi2_win-hifi2_olp
        melstep = hifi1_out[:,start:end,:]
        excistep = exci_out[:,(start)*20:(end)*20]
        rpad = hifi2_win - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
            excistep = tf.concat([excistep,tf.zeros([1,rpad*20,excistep.shape[2]])],axis=1)
        #hifi_out_step = hifi2_model((melstep,excistep),training=True)
        sig_out = sig(hifi2_in=melstep,hifi2_exci=excistep)
        hifi_out_step = sig_out['hifigan2']
        #print('hifi_out {} '.format(hifi_out_step.shape))
        if step == hifi2_olp:
            hifi_out_step = hifi_out_step[:,:-25,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,25:-25,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,25:-(rpad*5),:]
                else:
                    hifi_out_step = hifi_out_step[:,25:,:]
        hifi_out_list.append(hifi_out_step)
        if end > time_len:
            break

    hifi2_out = tf.concat(hifi_out_list,axis=1)
    print('hifi2 {}'.format(hifi2_out.shape))

    model_path = 'tfmodel/hifigan3'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi3_model = tf.lite.Interpreter(tflite_path)
    sig = hifi3_model.get_signature_runner('serving_default')
    hifi3_win = 700
    hifi3_olp = 20
    hifi3_up = 2
    hifi_out_list = []
    time_len = tf.shape(hifi2_out).numpy()[1]
    for step in range(hifi3_olp,time_len,hifi3_win-2*hifi3_olp):
        start = step-hifi3_olp
        end = step+hifi3_win-hifi3_olp
        melstep = hifi2_out[:,start:end,:]
        excistep = exci_out[:,start*4:end*4]
        rpad = hifi3_win - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
            excistep = tf.concat([excistep,tf.zeros([1,rpad*4,excistep.shape[2]])],axis=1)
        #hifi_out_step = hifi3_model((melstep,excistep),training=True)
        sig_out = sig(hifi3_in=melstep,hifi3_exci=excistep)
        hifi_out_step = sig_out['hifigan3']
        #print('hifi_out {} '.format(hifi_out_step.shape))
        if step == hifi3_olp:
            hifi_out_step = hifi_out_step[:,:-40,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,40:-40,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,40:-(rpad*2),:]
                else:
                    hifi_out_step = hifi_out_step[:,40:,:]
        hifi_out_list.append(hifi_out_step)
        #print('hifi_out {} {} {}'.format(step,hifi_out_step.shape,rpad))
        if end > time_len:
            break

    hifi3_out = tf.concat(hifi_out_list,axis=1)
    print('hifi3 {}'.format(hifi3_out.shape))

    model_path = 'tfmodel/hifigan4'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi4_model = tf.lite.Interpreter(tflite_path)
    sig = hifi4_model.get_signature_runner('serving_default')
    hifi4_win = 1320 
    hifi4_olp = 20
    hifi4_up = 2
    hifi_out_list = []
    time_len = tf.shape(hifi3_out).numpy()[1]
    print('hifi4 time {} {}'.format(time_len,exci_out.shape))
    for step in range(hifi4_olp,time_len,hifi4_win-2*hifi4_olp):
        start = step-hifi4_olp
        end = step+hifi4_win-hifi4_olp
        melstep = hifi3_out[:,start:end,:]
        excistep = exci_out[:,(start)*2:(end)*2]
        rpad = hifi4_win - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
            excistep = tf.concat([excistep,tf.zeros([1,rpad*2,excistep.shape[2]])],axis=1)
        #hifi_out_step = hifi4_model((melstep,excistep),training=True)
        sig_out = sig(hifi4_in=melstep,hifi4_exci=excistep)
        hifi_out_step = sig_out['hifigan4']
        #print('hifi_out {} '.format(hifi_out_step.shape))
        if step == hifi4_olp:
            hifi_out_step = hifi_out_step[:,:-40,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,40:-40,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,40:-(rpad*2),:]
                else:
                    hifi_out_step = hifi_out_step[:,40:,:]
        hifi_out_list.append(hifi_out_step)
        if end > time_len:
            break

    hifi4_out = tf.concat(hifi_out_list,axis=1)
    print('hifi4 {}'.format(hifi4_out.shape))

    model_path = 'tfmodel/hifigan5'
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    hifi5_model = tf.lite.Interpreter(tflite_path)
    sig = hifi5_model.get_signature_runner('serving_default')
    hifi5_win = 8008 
    hifi5_olp = 4
    hifi_out_list = []
    time_len = tf.shape(hifi4_out).numpy()[1]
    for step in range(4,time_len,8000):
        start = step-4
        end = step+hifi5_win-hifi5_olp
        melstep = hifi4_out[:,start:end,:]
        rpad = 8008 - tf.shape(melstep).numpy()[1]
        if rpad > 0:
            melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
        #hifi_out_step = hifi5_model((melstep),training=True)
        sig_out = sig(hifi5_in=melstep)
        hifi_out_step = sig_out['hifigan5']
        #print('hifi_out {} '.format(hifi_out_step.shape))
        if step == 4:
            hifi_out_step = hifi_out_step[:,:-4,:]
        else:
            if end<=time_len:
                hifi_out_step = hifi_out_step[:,4:-4,:]
            else:
                if rpad > 0:
                    hifi_out_step = hifi_out_step[:,4:-(rpad),:]
                else:
                    hifi_out_step = hifi_out_step[:,4:,:]
        hifi_out_list.append(hifi_out_step)
        if end > time_len:
            break

    hifi5_out = tf.concat(hifi_out_list,axis=1)
    print('hifi5 {}'.format(hifi5_out.shape))

    pcm = (tf.reshape(hifi5_out,(-1))*32768.0).numpy().astype(dtype=np.int16)
    return pcm
   #import wave
   #with wave.open("tflitedebug_{}.wav".format(fileid), 'wb') as wf:
   #    wf.setnchannels(1)
   #    wf.setsampwidth(2)
   #    wf.setframerate(16000)
   #    wf.writeframes(pcm.data)


#print('tmp_vocinput {}'.format(model_id.voices['F7'].tmp_vocinput.shape))
#voc_infer(model_id.voices['F7'].tmp_vocinput)
#voc_infer()
