import sys
import os
import time
import tensorflow as tf
import numpy as np
from modelscope.models.audio.tts import SambertHifigan
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#model_dir = os.path.abspath("./pretrain_yangmi")
model_dir = os.path.abspath(sys.argv[1])

custom_infer_abs = {
    'voice_name':
    'F7',
    'am_ckpt':
    os.path.join(model_dir, 'tmp_am', 'ckpt'),
    'am_config':
    os.path.join(model_dir, 'tmp_am', 'config.yaml'),
    'voc_ckpt':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan', 'ckpt'),
    'voc_config':
    os.path.join(model_dir, 'orig_model', 'basemodel_16k', 'hifigan',
             'config.yaml'),
    'audio_config':
    os.path.join(model_dir, 'data', 'audio_config.yaml'),
    'se_file':
    os.path.join(model_dir, 'data', 'se', 'se.npy')
}
kwargs = {'custom_ckpt': custom_infer_abs}

#model_id = SambertHifigan(os.path.join(model_dir, "orig_model"), **kwargs)
#
#inference = pipeline(task=Tasks.text_to_speech, model=model_id, device='cpu')
#srctxt = sys.argv[2]
#dstpath = sys.argv[3]
#with open(srctxt, 'r') as f:
#   alltxt = f.readlines() 
#for txtline in alltxt:
#    txtline=txtline.strip().split()
#    start = time.time()
#    output = inference(input=txtline[1])
#    print('spend time {}'.format(time.time()-start))
#    wav = output[OutputKeys.OUTPUT_WAV]
#    with open(os.path.join(dstpath,txtline[0]+".wav"), 'wb') as f:
#        f.write(wav)


#print('voc para')
#voc_model_para = model_id.voices['F7'].voc_model.state_dict()
#for k,v in voc_model_para.items():
#     print("{:20s} {}".format(k, v.shape))
def voc_infer(post_mel):
#def voc_infer():

    #hifi_model.save('tfmodel/hifigan')

    model_path = 'tfmodel/hifigan'
    converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.target_spec.supported_types = [tf.float16]
    converter.target_spec.supported_ops = [
        tf.lite.OpsSet.TFLITE_BUILTINS,  # enable TensorFlow Lite ops.
        #tf.lite.OpsSet.SELECT_TF_OPS  # enable TensorFlow ops.
    ]
    tflite_model = converter.convert()
    # Print the signatures from the converted model
    interpreter = tf.lite.Interpreter(model_content=tflite_model)
    signatures = interpreter.get_signature_list()
    print(signatures)
    tflite_path = model_path.replace('tfmodel','tflite') + '.tflite'
    with open(tflite_path,'wb') as f:
        f.write(tflite_model)

    hifi_model = tf.lite.Interpreter(tflite_path)
    sig = hifi_model.get_signature_runner('serving_default')
    start = time.time()
    hifi_in = tf.expand_dims(tf.convert_to_tensor(post_mel.cpu().numpy()),axis=0)
    sig_out = sig(mel_in=hifi_in)
    print('hifi time {}'.format(time.time()-start))
    hifi_out = sig_out['hifigan']

    #start = time.time()
    #hifi_out_list = []
    #time_len = tf.shape(hifi_in).numpy()[1]
    #for step in range(8,time_len,60):
    #    melstep = hifi_in[:,step-8:step+68,:]
    #    rpad = 76 - tf.shape(melstep).numpy()[1]
    #    if rpad > 0:
    #        melstep = tf.concat([melstep,tf.zeros([1,rpad,melstep.shape[2]])],axis=1)
    #    sig_out = sig(mel_in=melstep)
    #    hifi_out_step = sig_out['hifigan']
    #    print('hifi_out {}'.format(hifi_out_step.shape))
    #    if step == 8:
    #        hifi_out_step = hifi_out_step[:,:-1600,:]
    #    else:
    #        if rpad == 0:
    #            hifi_out_step = hifi_out_step[:,1600:-1600,:]
    #        else:
    #            hifi_out_step = hifi_out_step[:,1600:-(rpad*200):,:]

    #    hifi_out_list.append(hifi_out_step)
    #hifi_out = tf.concat(hifi_out_list,axis=1)
    #print('hifi time {}'.format(time.time()-start))

    pcm = (tf.reshape(hifi_out,(-1))*32768.0).numpy().astype(dtype=np.int16)
    import wave
    with wave.open("tflitedebug.wav", 'wb') as wf:
        wf.setnchannels(1)
        wf.setsampwidth(2)
        wf.setframerate(16000)
        wf.writeframes(pcm.data)


#print('tmp_vocinput {}'.format(model_id.voices['F7'].tmp_vocinput.shape))
#voc_infer(model_id.voices['F7'].tmp_vocinput)
#voc_infer()
