import os
import argparse
import json
import text
import numpy as np

save_dir = './filelists/'  # cleaned text save dir
with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f:
    hps = json.load(f)


def devide_train_test(cleaned_pair, text_path, test_ratio=0.1):
    def save_text_file(file_name, wav_text_pairs):
        with open(file_name, 'w', encoding='utf-8') as f:
            for pair in wav_text_pairs:
                f.write('|'.join(pair) + "\n")

    cleaned_pair = np.array(cleaned_pair)
    # 设置随机数种子，保证每次生成的结果都是一样的
    np.random.seed(42)
    # permutation随机生成0-len(data)随机序列
    shuffled_indices = np.random.permutation(len(cleaned_pair))
    # test_ratio为测试集所占的半分比
    test_set_size = int(len(cleaned_pair) * test_ratio)
    test_indices = shuffled_indices[:test_set_size]
    train_indices = shuffled_indices[test_set_size:]
    # iloc选择参数序列中所对应的行
    wav_text_pairs_train = cleaned_pair[train_indices]
    wav_text_pairs_test = cleaned_pair[test_indices]
    train_file_path = save_dir + os.path.basename(text_path).replace('.txt', '_train.txt') + ".cleaned"
    test_file_path = save_dir + os.path.basename(text_path).replace('.txt', '_test.txt') + ".cleaned"
    save_text_file(train_file_path, wav_text_pairs_train.tolist())
    save_text_file(test_file_path, wav_text_pairs_test.tolist())
    return train_file_path, test_file_path


def clean_text(text_path, speaker_id, lang):
    os.path.exists(text_path), "{} not exists!".format(text_path)
    cleaned_pair = []
    lines = []
    with open(text_path, 'r', encoding='utf-8') as f:
        lines += f.readlines()
    for li in lines:
        pair = li.split('|')
        pair[-1] = '[{}]{}[{}]'.format(lang, pair[-1].strip(), lang)
        new_pair = [pair[0], str(speaker_id),
                    text._clean_text(pair[-1], hps['data']['text_cleaners']).replace("[{}]".format(lang), "")]
        cleaned_pair.append(new_pair)
    return cleaned_pair


def process(txt_info_list):
    def merge_file(file_path_list, merged_file_path):
        lines = []
        for file_path in file_path_list:
            with open(file_path, 'r', encoding='utf-8') as f:
                lines.extend(f.readlines())
        with open(merged_file_path, 'w', encoding='utf-8') as f:
            f.writelines(lines)

    # text_path, wav_dir, speaker_name, speaker_id = 0, lang = 'ZH'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)  # 可以实现递归创建目录的功能
    # hps['data']["n_speakers"] = len(txt_info_list)
    # overwrite speaker names
    hps['train']['log_interval'] = 10
    hps['train']['eval_interval'] = 1000
    hps['train']['batch_size'] = 8
    hps['data']['n_speakers'] = len(txt_info_list)
    hps['data']['training_files'] = save_dir + os.path.basename('training.txt.cleaned')
    hps['data']['validation_files'] = save_dir + os.path.basename('testing.txt.cleaned')
    hps['speakers'] = {}
    train_file_list = []
    test_file_list = []
    for txt_info in txt_info_list:
        text_path = txt_info['text_path']
        lang = txt_info['lang']
        hps['speakers'][txt_info['speaker_name']] = txt_info['speaker_id']
        cleaned_pair = clean_text(text_path, txt_info['speaker_id'], lang)
        train_file, test_file = devide_train_test(cleaned_pair, text_path)
        train_file_list.append(train_file)
        test_file_list.append(test_file)
    # save modified config
    with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f:
        json.dump(hps, f, indent=2)
    merge_file(train_file_list, hps['data']['training_files'])
    merge_file(test_file_list, hps['data']['validation_files'])
    print("task done!")


def check_audio_exist(file_list):
    for file in file_list:
        with open(file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for li in lines:
                path = li.split('|')[0]
                assert os.path.isfile(path)
    print('check passed!')


if __name__ == "__main__":

    pass
