import torch
import librosa
import wave
import numpy as np
import scipy
import json
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

import wave
import os
import numpy as np
from pydub import AudioSegment

from typing import Tuple, List, Mapping
import torch
from tqdm import trange

def load_vocab(path):
    """Load vocab and build word dict.
    Args:
        path: vocab path.
    """
    word_to_ix = {'[PAD]': 0, '[UNK]': 1}
    with open(path, 'r', encoding='utf-8') as f:
        for word in f.readlines():
            word = word.rstrip("\n")
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)
    return word_to_ix


def create_dataset(
                   tags: List[List[str]],
                   word_to_ix: Mapping[str, int],
                   max_seq_len: int,
                   pad_ix: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Convert List[str] -> torch.Tensor.
    Returns:
        seqs_tensor: shape=[num_seqs, max_seq_len].
        seqs_mask: shape=[num_seqs, max_seq_len].
        tags_tesnor: shape=[num_seqs, max_seq_len].
    """

    tags_tesnor = torch.ones(64, 64) * pad_ix
    for i in trange(len(tags)):
        for j, tag in enumerate(tags[i]):
            tags_tesnor[i, j] = word_to_ix.get(tag, word_to_ix['[UNK]'])
    return tags_tesnor.long()


def create_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
    """Convert mask to attention mask.
    """
    extended_attention_mask = raw_mask.unsqueeze(1).unsqueeze(2)
    extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
    return extended_attention_mask.float()


def create_transformer_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
    """Convert mask to transformer attention mask.
    """
    return (1 - raw_mask).bool()









sample_rate = 16000
window_size = 0.02
window_stride = 0.01
n_fft = int(sample_rate * window_size)
win_length = n_fft
hop_length = int(sample_rate * window_stride)
window = "hamming"


def load_audio(wav_path, normalize=True):
    wav =np.frombuffer(AudioSegment.from_mp3(wav_path).raw_data, dtype="int16")
    wav = wav.astype("float")
    if normalize:
        return (wav - wav.mean()) / wav.std()
    else:
        return wav


def spectrogram(wav, normalize=True):
    D = librosa.stft(
        wav, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window
    )

    spec, phase = librosa.magphase(D)
    spec = np.log1p(spec)
    spec = torch.FloatTensor(spec)

    if normalize:
        spec = (spec - spec.mean()) / spec.std()

    return spec
import os

class MASRDataset(Dataset):
    def __init__(self, index_path, labels_path):


        with open(labels_path) as f:
            idx = f.readlines()

        self.path_name_list=[]
        self.label_path_list=[]
        for data_one in idx[1:]:
            self.path_name_list.append(data_one.split("\t")[1])
            self.label_path_list.append(data_one.split("\t")[2].replace(" ", ''))

        for one_data in os.listdir(index_path[0]):
            if one_data in self.path_name_list:
                self.path_name_list[self.path_name_list.index(one_data)]=index_path[0]+"/"+one_data
        for one_data in os.listdir(index_path[1]):
            if one_data in self.path_name_list:
                self.path_name_list[self.path_name_list.index(one_data)] = index_path[1] + "/" + one_data
        self.word_to_ix = load_vocab("/home/chenyang/PycharmProjects/masr/data_aishell/vocabs")




    def __getitem__(self, index):
        # print(index)
        wav=self.path_name_list[index]
        wav=load_audio(wav)
        spect = spectrogram(wav)
        # list(filter(None, [self.labels.get(x) for x in transcript]))
        label= list(filter(None,[float(self.word_to_ix.get(i,self.word_to_ix["[PAD]"])) for i in self.label_path_list[index]]))

        return spect, label

    def __len__(self):
        return len(self.path_name_list)


def _collate_fn(batch):
    def func(p):
        return p[0].size(1)

    batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)
    longest_sample = max(batch, key=func)[0]
    freq_size = longest_sample.size(0)
    minibatch_size = len(batch)
    max_seqlength = longest_sample.size(1)
    inputs = torch.zeros(minibatch_size, freq_size, max_seqlength)
    input_lens = torch.IntTensor(minibatch_size)
    target_lens = torch.IntTensor(minibatch_size)
    targets = []
    for x in range(minibatch_size):
        sample = batch[x]
        tensor = sample[0]
        target = sample[1]
        seq_length = tensor.size(1)
        inputs[x].narrow(1, 0, seq_length).copy_(tensor)
        input_lens[x] = seq_length
        target_lens[x] = len(target)
        targets.extend(target)

    targets = torch.IntTensor(targets)
    return inputs, targets, input_lens, target_lens


class MASRDataLoader(DataLoader):
    def __init__(self, *args, **kwargs):
        super(MASRDataLoader, self).__init__(*args, **kwargs)
        self.collate_fn = _collate_fn

if __name__ == '__main__':
    train_path="/home/chenyang/PycharmProjects/common_voice/clips1"
    train_path1="/home/chenyang/PycharmProjects/common_voice/clips2"
    label_path="/home/chenyang/PycharmProjects/common_voice/common_voice_chinese_data/train.tsv"
    MASRDataset([train_path,train_path1],label_path)