import os
from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
from paddlespeech.t2s.exps.voice_cloning import parse_args
from tts3.local.check_oov import get_check_result
from tts3.local.get_mfa_result import get_mfa_result
from tts3.local.generate_duration import gen_duration_from_textgrid_do
from tts3.local.extract_feature import extract_feature
from tts3.local.prepare_env import generate_finetune_env
from tts3.local.finetune import finetune_new
from tts3.local.finetune import parse_args as finetune_parse_args

from paddlespeech.t2s.exps.synthesize_e2e import parse_args as synthesize_e2e_parse_args
from paddlespeech.t2s.exps.synthesize_e2e import evaluate as synthesize_evaluate

from pathlib import Path
import yaml
from yacs.config import CfgNode
import shutil  

def find_max_ckpt(model_path):
    max_ckpt = 0
    for filename in os.listdir(model_path):
        if filename.endswith('.pdz'):
            files = filename[:-4]
            a1, a2, it = files.split("_")
            if int(it) > max_ckpt:
                max_ckpt = int(it)
    return max_ckpt


class FineTune:
    def __init__(self):
        self.now_file_path = os.path.dirname(__file__)

        #不需要使用
        self.PYTHONPATH = os.path.join(MAIN_ROOT,
                                       "examples/other/tts_finetune/tts3")
        self.BIN_DIR = os.path.join(MAIN_ROOT,
                                    "paddlespeech/t2s/exps/fastspeech2")
        

        self.pretrained_model_dir = os.path.realpath(
            "F:/vs_code/file/test-finetune/finetune-01/pretrained_models/fastspeech2_aishell3_ckpt_1.1.0")
        self.voc_model_dir = os.path.realpath(
            "F:/vs_code/file/test-finetune/finetune-01/pretrained_models/hifigan_aishell3_ckpt_0.2.0")
        # 微调配置文件
        self.finetune_config = os.path.join("./study/finetune/conf/tts3_finetune.yaml")

    def finetune(self,pretrained_model_dir,voc_model_dir, input_dir, exp_dir='temp', epoch=100):

        self.pretrained_model_dir = pretrained_model_dir
        self.voc_model_dir = voc_model_dir

        """
        use cmd follow examples/other/tts_finetune/tts3/run.sh
        """
        newdir_name = "newdir"
        new_dir = os.path.join(input_dir, newdir_name)
        mfa_dir = os.path.join(exp_dir, 'mfa_result')
        dump_dir = os.path.join(exp_dir, 'dump')
        output_dir = os.path.join(exp_dir, 'exp')
        lang = "zh"
        ngpu = get_ngpu()


        # # # # # # # # # 
        # check_oov
        # # # # # # # # # 
        check_oov_args = parse_args()
        check_oov_args.input_dir = input_dir;
        check_oov_args.pretrained_model_dir = self.pretrained_model_dir;
        check_oov_args.newdir_name = newdir_name;
        check_oov_args.lang = lang;
        # get_check_result(check_oov_args);

        input_dir = Path(check_oov_args.input_dir).expanduser()
        pretrained_model_dir = Path(check_oov_args.pretrained_model_dir).expanduser()
        am_phone_file = pretrained_model_dir / "phone_id_map.txt"
        label_file = input_dir / "labels.txt"
        get_check_result(
            label_file=label_file,
            am_phone_file=am_phone_file,
            input_dir=input_dir,
            newdir_name=check_oov_args.newdir_name,
            lang=check_oov_args.lang);
        print("==========check_oov.get_check_result()=======")
    

        # # # # # # # # # 
        # get mfa result
        # # # # # # # # # 
        get_mfa_result(new_dir,mfa_dir,lang)
        print("==========get_mfa_result.get_mfa_result()=======")
        # 删除掉生成mfa的输入文件
        shutil.rmtree(new_dir)  

        # # # # # # # # # 
        # generate durations.txt
        # # # # # # # # # 
        gen_duration_from_textgrid_do(mfa_dir)
        print("==========gen_duration_from_textgrid.gen_duration_from_textgrid_do()=======")
        

        # # # # # # # # # 
        # extract_feature
        # # # # # # # # # 
        dump_dir = Path(dump_dir).expanduser()
        dump_dir.mkdir(parents=True, exist_ok=True)
        duration_file = "./durations.txt"
        # read config
        config_file = pretrained_model_dir / "default.yaml"
        default_replace_spkid = 0;
        with open(config_file) as f:
            config = CfgNode(yaml.safe_load(f))

        extract_feature(
            duration_file=duration_file,
            config=config,
            input_dir=input_dir,
            dump_dir=dump_dir,
            pretrained_model_dir=pretrained_model_dir,
            replace_spkid=default_replace_spkid)

        print("==========extract_feature.extract_feature()=======")

        # # # # # # # # # 
        # prepare_env
        # # # # # # # # #  
        generate_finetune_env(pretrained_model_dir=Path(pretrained_model_dir),output_dir=Path(output_dir))
        print("==========prepare_env.generate_finetune_env()=======")

        # # # # # # # # # 
        # finetune
        # # # # # # # # # 
        args = finetune_parse_args()
        args.pretrained_model_dir = str(pretrained_model_dir)
        args.dump_dir = str(dump_dir)
        args.output_dir = str(Path(output_dir))
        args.epoch = 500
        args.ngpu = ngpu
        args.finetune_config = self.finetune_config
        finetune_new(args)
        print("==========finetune.finetune()=======")

        return exp_dir;

        cmd = f"""
            # check oov
            python3 {self.PYTHONPATH}/local/check_oov.py \
                --input_dir={input_dir} \
                --pretrained_model_dir={self.pretrained_model_dir} \
                --newdir_name={newdir_name} \
                --lang={lang}
            
            # get mfa result
            python3 {self.PYTHONPATH}/local/get_mfa_result.py \
                --input_dir={new_dir} \
                --mfa_dir={mfa_dir} \
                --lang={lang}
            
            # generate durations.txt
            python3 {self.PYTHONPATH}/local/generate_duration.py \
                --mfa_dir={mfa_dir} 
            
            # extract feature
            python3 {self.PYTHONPATH}/local/extract_feature.py \
                --duration_file="./durations.txt" \
                --input_dir={new_dir} \
                --dump_dir={dump_dir} \
                --pretrained_model_dir={self.pretrained_model_dir}
            
            # create finetune env
            python3 {self.PYTHONPATH}/local/prepare_env.py \
                --pretrained_model_dir={self.pretrained_model_dir} \
                --output_dir={output_dir}
            
            # finetune
            python3 {self.PYTHONPATH}/local/finetune.py \
                --pretrained_model_dir={self.pretrained_model_dir} \
                --dump_dir={dump_dir} \
                --output_dir={output_dir} \
                --ngpu={ngpu} \
                --epoch=100 \
                --finetune_config={self.finetune_config}
        """

        # print(cmd)

        # return run_cmd(cmd, exp_dir)


    def synthesize(self,pretrained_model_dir , voc_model_dir, text, wav_name, out_wav_dir, exp_dir='temp'):

        self.pretrained_model_dir = pretrained_model_dir
        self.voc_model_dir = voc_model_dir

        voc = "hifigan_aishell3"
        dump_dir = os.path.join(exp_dir, 'dump')
        output_dir = os.path.join(exp_dir, 'exp')
        text_path = os.path.join(exp_dir, 'sentences.txt')
        lang = "zh"
        ngpu = get_ngpu()

        model_path = f"{output_dir}/checkpoints"
        ckpt = find_max_ckpt(model_path)
        print(ckpt)
        # 生成对应的语句
        with open(text_path, "w", encoding='utf8') as f:
            f.write(wav_name + " " + text)

        # # # # # # # # # 
        # synthesize
        # # # # # # # # # 
        args = synthesize_e2e_parse_args()
        args.am = "fastspeech2_aishell3"
        args.am_config =  str(self.pretrained_model_dir + "/default.yaml")
        args.am_ckpt = output_dir + str("/checkpoints/snapshot_iter_"+ str(ckpt) +".pdz") 
        args.am_stat = self.pretrained_model_dir + "/speech_stats.npy"
        args.voc = voc
        args.voc_config = self.voc_model_dir + "/default.yaml"
        args.voc_ckpt = self.voc_model_dir + "/snapshot_iter_2500000.pdz"
        args.voc_stat = self.voc_model_dir + "/feats_stats.npy"
        args.lang = lang
        args.text = text_path
        args.output_dir = out_wav_dir
        args.phones_dict= dump_dir + "/phone_id_map.txt"
        args.speaker_dict= dump_dir + "/speaker_id_map.txt"
        args.spk_id = 0
        args.ngpu = ngpu 
        synthesize_evaluate(args)
        print("==========synthesize.synthesize_evaluate()=======")
        out_path = os.path.join(out_wav_dir, f"{wav_name}.wav")
        return out_path

        # cmd = f"""
        #     FLAGS_allocator_strategy=naive_best_fit \
        #     FLAGS_fraction_of_gpu_memory_to_use=0.01 \
        #     python3 {self.BIN_DIR}/../synthesize_e2e.py \
        #         --am=fastspeech2_aishell3 \
        #         --am_config={self.pretrained_model_dir}/default.yaml \
        #         --am_ckpt={output_dir}/checkpoints/snapshot_iter_{ckpt}.pdz \
        #         --am_stat={self.pretrained_model_dir}/speech_stats.npy \
        #         --voc={voc} \
        #         --voc_config={self.voc_model_dir}/default.yaml \
        #         --voc_ckpt={self.voc_model_dir}/snapshot_iter_2500000.pdz \
        #         --voc_stat={self.voc_model_dir}/feats_stats.npy \
        #         --lang={lang} \
        #         --text={text_path} \
        #         --output_dir={out_wav_dir} \
        #         --phones_dict={dump_dir}/phone_id_map.txt \
        #         --speaker_dict={dump_dir}/speaker_id_map.txt \
        #         --spk_id=0 \
        #         --ngpu={ngpu}
        # """
        # out_path = os.path.join(out_wav_dir, f"{wav_name}.wav")
        # return run_cmd(cmd, out_path)
