import json
import os
import jsonlines
import tqdm

from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch import nn
import torch
import joblib
import torchaudio
from config import PARAMETER
import config
import pickle

logger = config.get_logger()


def load_wav(wav_file_path: str, normalize: bool = True):
    """
    加载wav文件
    参数:
    wav_file_path: wav文件路径
    normalize: 是否归一化
    返回:
    wav,  : torch.Tensor,(dot_seq_size, )
    """
    wav, _ = torchaudio.load(wav_file_path)
    if normalize:
        wav = (wav - wav.mean()) / wav.std()
    return wav


def spectrogram(wav, normalize=True):
    """
    计算频谱图
    参数:
    wav: wav文件
    normalize: 是否归一化
    返回:
    spec : torch.Tensor, (seq_size, fbank)
    """
    spec = torchaudio.compliance.kaldi.fbank(waveform=wav, sample_frequency=16000, num_mel_bins=80)
    if normalize:
        spec = (spec - spec.mean()) / spec.std()
    return spec


def prehand_data(data_dir: str = config.DATA_DIR,
                 prehand_save_dir: str = config.PREHAND_SAVE_DIR):
    """
    预处理数据,构建出train.idx.json文件和label_list.pkl文件
    train.idx文件如下:每一行形如
    {'path':'/d/s/s.wav', 'transcript':'你是 个 好人'}
    label_list.pkl是个str_list, 第一个元素是'_' 用来表示ctc_blank
    """

    def get_wav_label_filename_list(datadir: str):
        wav_list = []
        label_list = []
        for root, dirs, files in os.walk(datadir):
            for file in files:
                if file.endswith(".wav") or file.endswith(".WAV"):
                    wav_list.append(os.path.join(root, file))
                    label_list.append(os.path.join(root, file + '.trn'))
        for i in range(len(wav_list)):
            assert wav_list[i].split('.')[0] == label_list[i].split('.')[0]
        return wav_list, label_list

    def do_prehand(wav_list, label_list, name='train'):
        idx_save_path = prehand_save_dir + f'prehand_data_{name}.idx.jsonl'
        data_dict_list = []
        token_set = set()
        logger.info(f'start prehand {name} data...')
        total = len(wav_list)
        for wav_file, label_file in tqdm.tqdm(zip(wav_list, label_list), total=total):
            one_row = dict()
            one_row['wav_path'] = wav_file
            with open(label_file, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                pinyin = lines[1]  # 使用拼音token
                pinyin = pinyin.rstrip()
                one_row['transcript'] = pinyin
            data_dict_list.append(one_row)
        with jsonlines.open(file=idx_save_path, mode='w') as writer:
            writer.write_all(data_dict_list)

    wav_file_list, label_file_list = get_wav_label_filename_list(data_dir)
    total = len(wav_file_list)
    train_num = int(total * 0.7654)
    dev_num = int(total * 0.1632)
    do_prehand(wav_file_list[:train_num], label_file_list[:train_num], 'train')
    do_prehand(wav_file_list[train_num:train_num + dev_num], label_file_list[train_num:train_num + dev_num], 'dev')
    do_prehand(wav_file_list[train_num + dev_num:], label_file_list[train_num + dev_num:], 'test')
    token_set = set()
    logger.info('start collection token list...')
    for label_file in tqdm.tqdm(label_file_list):
        with open(label_file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            pinyin = (lines[1]).strip()  # 使用拼音token
            token_set.update([x for x in pinyin.split(' ')])
    token_list = list(token_set)
    token_list.insert(0, '_')
    with open(prehand_save_dir + 'token_list.pkl', 'wb') as f:
        pickle.dump(token_list, f)


def read_jsonl(file_path):
    """由jsonl文件得到dict_list"""
    dict_list = []
    with jsonlines.open(file=file_path, mode='r') as reader:
        for row in reader:
            dict_list.append(dict(row))
    return dict_list


class MASRDataset(Dataset):
    """
    参数:
    idx_file_path:
    idx文件路径, 每行数据: wav_file_path, transcript,例如: /wav/0.wav , 你是 个 好人
    label_file_path:
    label文件路径, 为所有token的list,同时包含  _ 作为ctc键

    返回:
    spectrogram : torch.Tensor, (seq_size, fbank)
    transcript : torch.Tensor, one dimension
    """

    def __init__(self, idx_file_path: str = config.PREHAND_SAVE_DIR + 'prehand_data_train.idx.jsonl',
                 label_file_path: str = config.PREHAND_SAVE_DIR + 'token_list.pkl'):
        super(MASRDataset, self).__init__()
        self.idx = read_jsonl(idx_file_path)
        labels = pickle.load(open(label_file_path, 'rb'))
        self.labels = dict((labels[i], i) for i in range(len(labels)))
        self.labels_str = labels

    def __getitem__(self, index):
        one_row = self.idx[index]  # transcript: str
        wav, transcript = one_row['wav_path'], one_row['transcript']
        wav = load_wav(wav)
        spec = spectrogram(wav)
        transcript = list(filter(None, [self.labels.get(x) for x in transcript.split(' ')]))
        return spec, torch.tensor(transcript)

    def __len__(self):
        return len(self.idx)


def _collection_fn(data):
    """
    wav_list: 单元: wav_tensor: (seq1_size, fbank)(float)
    transcript_list: 单元: transcript_tensor: (seq2_size, )(int)
    输出:
    inputs_tensor,(batch, seq1_size, fbank),
    targets_tensor,(batch, seq2_size),
    inputs_len,(batch),
    targets_len,(batch),
    """
    wav_list, transcript_list = zip(*data)
    inputs_len = torch.tensor([len(x) for x in wav_list])
    targets_len = torch.tensor([len(x) for x in transcript_list])
    inputs_tensor = pad_sequence(wav_list, batch_first=True, padding_value=PARAMETER.PADDING_ID)
    targets_tensor = pad_sequence(transcript_list, batch_first=True, padding_value=PARAMETER.PADDING_ID)
    return inputs_tensor, targets_tensor, inputs_len, targets_len


class MASRDataLoader(DataLoader):
    def __init__(self, *args, **kwargs):
        super(MASRDataLoader, self).__init__(*args, **kwargs)
        self.collate_fn = _collection_fn


if __name__ == '__main__':
    """"""
    dataset = MASRDataset()
    # for i in range(len(dataset)):
    #     spec, transcript = dataset[i]
    #     print(spec.shape, transcript.shape)
    dataloader = MASRDataLoader(dataset, batch_size=12)
    for input, target, input_len, target_len in dataloader:
        print(target_len)
