import os
import sys

CURR_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, CURR_BASE_DIR)

def proc_autolabel(ori_audio, out_audio):
    from modelscope.tools import run_auto_label
    ret, report = run_auto_label(
                      input_wav = input_wav,
                      work_dir = out_audio,
                      resource_revision = "v1.0.7"
                      )
    print(report)
    return ret

def proc_training(dataset_id, pretrain_work_dir):
    from modelscope.metainfo import Trainers
    from modelscope.trainers import build_trainer
    from modelscope.utils.audio.audio_utils import TtsTrainType
    pretrained_model_id = os.path.join(CURR_BASE_DIR,'speech_personal_sambert-hifigan_nsf_tts_zh-cn_pretrain_16k')

    # 训练信息，用于指定需要训练哪个或哪些模型，这里展示AM和Vocoder模型皆进行训练
    # 目前支持训练：TtsTrainType.TRAIN_TYPE_SAMBERT, TtsTrainType.TRAIN_TYPE_VOC
    # 训练SAMBERT会以模型最新step作为基础进行finetune
    train_info = {
        TtsTrainType.TRAIN_TYPE_SAMBERT: {  # 配置训练AM（sambert）模型
            'train_steps': 202,               # 训练多少个step 
            'save_interval_steps': 200,       # 每训练多少个step保存一次checkpoint
            'log_interval': 10               # 每训练多少个step打印一次训练日志
        }
    }
    
    # 配置训练参数，指定数据集，临时工作目录和train_info
    kwargs = dict(
        model=pretrained_model_id,                  # 指定要finetune的模型
        model_revision = "v1.0.6",
        work_dir=pretrain_work_dir,                 # 指定临时工作目录
        train_dataset=dataset_id,                   # 指定数据集id
        train_type=train_info                       # 指定要训练类型及参数
    )
    
    trainer = build_trainer(Trainers.speech_kantts_trainer,
                            default_args=kwargs)
    
    trainer.train()

def proc_vc(ori_path,work_path):
    if not os.path.exists(ori_path):
        print('usage: ttsvc_training origin_wav_path working_path')
        return

    ######### auto label ###########
    autolab_audio = os.path.join(work_path,'autolab_audio')
    if not os.path.exists(autolab_audio):
        os.makedirs(autolab_audio)
    #proc_autolabel(ori_path,autolab_audio)

    ######### training ###########
    training_out = os.path.join(work_path,'pretrain_out')
    if not os.path.exists(training_out):
        os.makedirs(training_out)
    #proc_training(autolab_audio,training_out)

    ######### export model ###########
    from ttsvc_convert_to_tf import convert_vcmodel_totf
    tflite_out = os.path.join(work_path,'tflite_model/am')
    if not os.path.exists(os.path.join(tflite_out)):
        os.makedirs(tflite_out)

    convert_vcmodel_totf(training_out, tflite_out)
    
if __name__=='__main__':
    input_wav = sys.argv[1] # wav audio path
    work_dir = sys.argv[2] # output path
    proc_vc(input_wav,work_dir)
