import os

import yaml
from ..utils import *
from torch.utils.data import DataLoader
import torch

from .dataset.dataset import Dataset


def _do_extra_tar_by_wenet_dataloader(shards_list_file, output_dir_path, batch_size=50, num_workers=10):
    """
    10 batch , 1 worker : 10tar包用时: 15.9s, nvidia-smi 在8卡使用，， 如果是no wav模式（无读无写），则4s左右
    10batch, 10worker: 16.02s, nvidia-smi 在8卡使用
    :param shards_list_file:
    :param output_dir_path:
    :param batch_size:
    :param num_workers:
    :return:
    """
    dataset_conf = {'batch_conf': {'batch_size': batch_size, 'batch_type': 'static', 'max_frames_in_batch': 0},
                    'fbank_conf': {'dither': 0.1, 'frame_length': 25, 'frame_shift': 10, 'num_mel_bins': 80},
                    'filter_conf': {'max_length': 1600, 'max_output_input_ratio': 0.125, 'min_length': 100,
                                    'token_max_length': 200, 'token_min_length': 1}, 'pitch_shift': False,
                    'resample_conf': {'resample_rate': 16000}, 'shuffle': False, 'shuffle_conf': {'shuffle_size': 10240},
                    'sort': False, 'sort_conf': {'sort_size': 512}, 'spec_aug': False,
                    'spec_aug_conf': {'max_f': 10, 'max_t': 50, 'num_f_mask': 2, 'num_t_mask': 2},
                    'speed_perturb': False,
                    'split_with_space': True, 'token_mask': False, 'token_mask_conf': {'p': 0.2},
                    'volume_perturb': False}

    test_dataset = Dataset("shard",
                           shards_list_file,
                           # "./tmp.list",
                           {},
                           None,
                           dataset_conf,
                           partition=False,
                           num_workers=num_workers)
    generator = torch.Generator()
    generator.manual_seed(1234)
    test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=num_workers, generator=generator,)
    time_now = do_get_now_time()
    output_dir = output_dir_path
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs(f'{output_dir}/wav', exist_ok=True)
    data_list_path = os.path.join(output_dir, "data.list")
    # text_scp_path = os.path.join(output_dir, "text.scp")
    # wav_scp_path = os.path.join(output_dir, "wav.scp")
    data_list = []
    for i, batch in enumerate(test_data_loader):
        sorted_key = batch['keys']
        sorted_txt = batch['labels']
        sorted_wav = batch['wavs']
        sorted_extra_dicts = batch['extra_dicts']
        sorted_file_path = batch['tar_files']
        j = 0
        for key, txt, wav ,extra_dict,file_path in zip(sorted_key, sorted_txt, sorted_wav,sorted_extra_dicts,sorted_file_path):
            j += 1
            # print_info(f'j: {j}, key: {key}, txt: {txt}')
            wav_path = os.path.join(f'{output_dir}/wav', f'{key}.wav')
            file_path_key = do_get_file_pure_name_from_path(file_path)
            item_dict = {'key': key, 'txt': txt, 'wav': wav_path, 'extra_dict': extra_dict, "file_path_key":file_path_key}
            data_list.append(item_dict)
            # torchaudio.save(wav_path, wav, 16000)
            with open(wav_path, "wb") as wav_file:
                wav_file.write(wav)
    time_elapsed = do_get_elapsed_time(time_now)
    print_info(f'time_elapsed: {time_elapsed}')  # 10个tar包用时13.5s
    write_dict_list_to_jsonl(data_list, data_list_path)


def do_get_data_loader_for_read(shards_list_file, batch_size=50, num_workers=10):
    """
    10 batch , 1 worker : 10tar包用时: 15.9s, nvidia-smi 在8卡使用，， 如果是no wav模式（无读无写），则4s左右
    10batch, 10worker: 16.02s, nvidia-smi 在8卡使用
    :param shards_list_file:
    :param batch_size:
    :param num_workers:
    :return:
    """
    dataset_conf = {'batch_conf': {'batch_size': batch_size, 'batch_type': 'static', 'max_frames_in_batch': 0},
                    'fbank_conf': {'dither': 0.1, 'frame_length': 25, 'frame_shift': 10, 'num_mel_bins': 80},
                    'filter_conf': {'max_length': 1600, 'max_output_input_ratio': 0.125, 'min_length': 100,
                                    'token_max_length': 200, 'token_min_length': 1}, 'pitch_shift': False,
                    'resample_conf': {'resample_rate': 16000}, 'shuffle': True, 'shuffle_conf': {'shuffle_size': batch_size},
                    'sort': True, 'sort_conf': {'sort_size': batch_size}, 'spec_aug': False,
                    'spec_aug_conf': {'max_f': 10, 'max_t': 50, 'num_f_mask': 2, 'num_t_mask': 2},
                    'speed_perturb': False,
                    'split_with_space': True, 'token_mask': False, 'token_mask_conf': {'p': 0.2},
                    'volume_perturb': False}

    test_dataset = Dataset("shard",
                           shards_list_file,
                           {},
                           None,
                           dataset_conf,
                           partition=False,
                           num_workers=num_workers)
    generator = torch.Generator()
    generator.manual_seed(1234)
    test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=num_workers, generator=generator,)
    return test_data_loader

    # time_now = do_get_now_time()
    # output_dir = output_dir_path
    # os.makedirs(output_dir, exist_ok=True)
    # os.makedirs(f'{output_dir}/wav', exist_ok=True)
    # data_list_path = os.path.join(output_dir, "data.list")
    # # text_scp_path = os.path.join(output_dir, "text.scp")
    # # wav_scp_path = os.path.join(output_dir, "wav.scp")
    # data_list = []
    # for i, batch in enumerate(test_data_loader):
    #     sorted_key = batch['keys']
    #     sorted_txt = batch['labels']
    #     sorted_wav = batch['wavs']
    #     sorted_extra_dicts = batch['extra_dicts']
    #     sorted_file_path = batch['tar_files']
    #     j = 0
    #     for key, txt, wav ,extra_dict,file_path in zip(sorted_key, sorted_txt, sorted_wav,sorted_extra_dicts,sorted_file_path):
    #         j += 1
    #         # print_info(f'j: {j}, key: {key}, txt: {txt}')
    #         wav_path = os.path.join(f'{output_dir}/wav', f'{key}.wav')
    #         file_path_key = do_get_file_pure_name_from_path(file_path)
    #         item_dict = {'key': key, 'txt': txt, 'wav': wav_path, 'extra_dict': extra_dict, "file_path_key":file_path_key}
    #         data_list.append(item_dict)
    #         # torchaudio.save(wav_path, wav, 16000)
    #         with open(wav_path, "wb") as wav_file:
    #             wav_file.write(wav)
    # time_elapsed = do_get_elapsed_time(time_now)
    # print_info(f'time_elapsed: {time_elapsed}')  # 10个tar包用时13.5s
    # write_dict_list_to_jsonl(data_list, data_list_path)

def do_convert_shards_to_format_raw(shards_list_file, output_dir_path, batch_size=50, num_workers=10):
    """
    10 batch , 1 worker : 10tar包用时: 15.9s, nvidia-smi 在8卡使用，， 如果是no wav模式（无读无写），则4s左右
    10batch, 10worker: 16.02s, nvidia-smi 在8卡使用
    wav: f{output_dir_path}/wav/{key}.wav
    data.list: f{output_dir_path}/data.list
    :param shards_list_file:
    :param output_dir_path:
    :param batch_size:
    :param num_workers:
    :return:
    """
    dataset_conf = {'batch_conf': {'batch_size': batch_size, 'batch_type': 'static', 'max_frames_in_batch': 0},
                    'fbank_conf': {'dither': 0.1, 'frame_length': 25, 'frame_shift': 10, 'num_mel_bins': 80},
                    'filter_conf': {'max_length': 1600, 'max_output_input_ratio': 0.125, 'min_length': 100,
                                    'token_max_length': 200, 'token_min_length': 1}, 'pitch_shift': False,
                    'resample_conf': {'resample_rate': 16000}, 'shuffle': True, 'shuffle_conf': {'shuffle_size': 10240},
                    'sort': True, 'sort_conf': {'sort_size': 512}, 'spec_aug': False,
                    'spec_aug_conf': {'max_f': 10, 'max_t': 50, 'num_f_mask': 2, 'num_t_mask': 2},
                    'speed_perturb': False,
                    'split_with_space': True, 'token_mask': False, 'token_mask_conf': {'p': 0.2},
                    'volume_perturb': False}

    test_dataset = Dataset("shard",
                           shards_list_file,
                           # "./tmp.list",
                           {},
                           None,
                           dataset_conf,
                           partition=False,
                           num_workers=num_workers)
    generator = torch.Generator()
    generator.manual_seed(1234)
    test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=num_workers, generator=generator,)

    time_now = do_get_now_time()
    output_dir = output_dir_path
    os.makedirs(output_dir, exist_ok=True)
    os.makedirs(f'{output_dir}/wav', exist_ok=True)
    data_list_path = os.path.join(output_dir, "data.list")
    # text_scp_path = os.path.join(output_dir, "text.scp")
    # wav_scp_path = os.path.join(output_dir, "wav.scp")
    data_list = []
    for i, batch in enumerate(test_data_loader):
        sorted_key = batch['keys']
        sorted_txt = batch['labels']
        sorted_wav = batch['wavs']
        sorted_extra_dicts = batch['extra_dicts']
        sorted_file_path = batch['tar_files']
        j = 0
        for key, txt, wav ,extra_dict,file_path in zip(sorted_key, sorted_txt, sorted_wav,sorted_extra_dicts,sorted_file_path):
            j += 1
            # print_info(f'j: {j}, key: {key}, txt: {txt}')
            wav_path = os.path.join(f'{output_dir}/wav', f'{key}.wav')
            file_path_key = do_get_file_pure_name_from_path(file_path)
            item_dict = {'key': key, 'txt': txt, 'wav': wav_path, 'extra': extra_dict, "tar_path_key":file_path_key}
            data_list.append(item_dict)
            # torchaudio.save(wav_path, wav, 16000)
            with open(wav_path, "wb") as wav_file:
                wav_file.write(wav)
    time_elapsed = do_get_elapsed_time(time_now)
    print_info(f'time_elapsed: {time_elapsed}')  # 10个tar包用时13.5s
    write_dict_list_to_jsonl(data_list, data_list_path)