import pickle
from typing import Union, Dict
import random
from pathlib import Path

import librosa
from torch.utils.data import Dataset
import torchaudio
import torch
import torch.nn.functional as F
import numpy as np



from utils.load_scp import get_source_list


def read_2column_text(path: Union[Path, str]) -> Dict[str, str]:
    """Read a text file having 2 column as dict object.

    Examples:
        wav.scp:
            key1 /some/path/a.wav
            key2 /some/path/b.wav

        >>> read_2column_text('wav.scp')
        {'key1': '/some/path/a.wav', 'key2': '/some/path/b.wav'}

    """

    data = {}
    with Path(path).open("r", encoding="utf-8") as f:
        for linenum, line in enumerate(f, 1):
            sps = line.rstrip().split(maxsplit=1)
            if len(sps) == 1:
                k, v = sps[0], ""
            else:
                k, v = sps
            if k in data:
                raise RuntimeError(f"{k} is duplicated ({path}:{linenum})")
            data[k] = v
    return data

def normalize(audio):
    max_value = np.max(np.abs(audio))
    return audio * (1 / (max_value + 1e-8))



def truc_wav(*x: torch.Tensor, length=None):
    """
    Given a list of tensors with the same length as arguments, chunk the tensors into a given length.
    Note that all the tensors will be chunked using the same offset
    x: [T] or [T,*]
    Args:
        x: the list of tensors to be chunked, should have the same length with shape [T, E]
        length: the length to be chunked into, if length is None, return the original audio
    Returns:
        A list of chuncked tensors
    """
    x_len = x[0].size(0)  # [T]
    res = []
    if length == None:
        for a in x:
            res.append(a)
        return res[0] if len(res) == 1 else res
    if x_len > length:
        offset = random.randint(0, x_len - length - 1)
        for a in x:
            res.append(a[offset : offset + length])
    # else:
    #     for a in x:
    #         res.append(F.pad(a, (0, 0, 0, length - a.size(0)), "constant"))
    else:
        for a in x:
            padding = [0] * (a.dim() - 1) * 2 + [
                0,
                length - a.size(0),
            ]  # Only pad the first dimension
            res.append(F.pad(a, padding, "constant"))
    return res[0] if len(res) == 1 else res


class TargetDataset(Dataset):
    def __init__(
        self,
        mix_path: str,
        regi_path: str,
        clean_path: str,
        sr: int,
        mix_ds=4,
        regi_ds=4,
    ):
        """
        The regular dataset for target speaker extraction.
        Has to provide three .scp files that have mix_path, regi_path, clean_path aligned
        _type:
            audio: standing for reading audio files
            npy: read numpy file
        """
        self.mix_key, self.mix_list = get_source_list(mix_path, ret_name=True)
        self.regi_key, self.regi_list = get_source_list(regi_path, ret_name=True)
        self.clean_key, self.clean_list = get_source_list(clean_path, ret_name=True)
        self.mix_length = int(sr * mix_ds)
        self.regi_length = int(sr * regi_ds)
        pass

    def __len__(self):
        return len(self.mix_list)

    def __getitem__(self, idx):
        name = self.mix_key[idx]
        regi_idx = self.regi_key.index(name)
        clean_idx = self.clean_key.index(name)
        mix_path = self.mix_list[idx]
        regi_path = self.regi_list[regi_idx]
        clean_path = self.clean_list[clean_idx]
        mix = torchaudio.load(mix_path)[0].squeeze(0)  # [T]
        regi = torchaudio.load(regi_path)[0].squeeze(0) # [T]
        clean = torchaudio.load(clean_path)[0].squeeze(0) # [T]
        # print(f"before mix shape {mix.shape}, clean shape {clean.shape}, regi shape {regi.shape}, clean_path {clean_path}, mix_path {mix_path}, regi_path {regi_path}")
        mix, clean = truc_wav(mix, clean, length=self.mix_length)
        regi = truc_wav(regi, length=self.regi_length)
        # print(f"after mix shape {mix.shape}, clean shape {clean.shape}, regi shape {regi.shape}")
        return mix, clean, regi, mix_path, clean_path, regi_path


class TargetDMDataset(Dataset):
    def __init__(
        self,
        spk_dict: str,
        clean_path: str,
        sr: int,
        mix_ds=4,
        regi_ds=4,
        snr = 5
    ):
        """
        The Dynamic Mixing dataset for target speaker extraction.
        Arguments:
            spk_dict: path to pickle of spk dict
        """
        self.clean_key, self.clean_list = get_source_list(clean_path, ret_name=True)
        with open(spk_dict, "rb") as f:
            self.spk_dict = pickle.load(f)

        self.mix_length = int(sr * mix_ds)
        self.regi_length = int(sr * regi_ds)
        self.snr = snr
        pass

    def __len__(self):
        return len(self.clean_key)

    def __getitem__(self, idx):
        name = self.clean_key[idx]
        clean_path = self.clean_list[self.clean_key.index(name)]
        clean_spk_id = name.split("-")[0]

        # choose regi spk
        regi_path = random.choice(self.spk_dict[clean_spk_id])
        while regi_path == clean_path:
            regi_path = random.choice(self.spk_dict[clean_spk_id])
        regi, _ = librosa.load(regi_path, sr = None)

        # choose intefering spk
        intf_spk = random.choice(list(self.spk_dict.keys()))
        while intf_spk == clean_spk_id:
            intf_spk = random.choice(list(self.spk_dict.keys()))
        intf_path = random.choice(self.spk_dict[intf_spk])

        clean_audio, _ = librosa.load(clean_path, sr = None)
        intf_audio, _ = librosa.load(intf_path, sr = None)
        ## pad the length 
        if clean_audio.shape[0] > intf_audio.shape[0]:
            ## repeat intf_audio 
            new_intf_audio = np.tile(intf_audio, len(clean_audio) // len(intf_audio) + 1)
            intf_audio = new_intf_audio[:len(clean_audio)]
        elif clean_audio.shape[0] < intf_audio.shape[0]:
            offset = random.randint(0, len(intf_audio) - len(clean_audio) - 1)
            intf_audio = intf_audio[offset: offset + len(clean_audio)]
        assert intf_audio.shape == clean_audio.shape
        ## normalize 
        clean_audio, intf_audio, regi = normalize(clean_audio), normalize(intf_audio), normalize(regi)
        ## snr
        _snr = random.random() * self.snr
        intf_audio = intf_audio * 10 ** (-_snr / 20)
        mix = clean_audio + intf_audio
        assert mix.shape == clean_audio.shape

        # truc length
        mix, clean, regi = torch.from_numpy(mix), torch.from_numpy(clean_audio), torch.from_numpy(regi)
        mix, clean = truc_wav(mix, clean, length = self.mix_length)
        regi = truc_wav(regi, length=self.regi_length)
        
        return mix, clean, regi, "", clean_path, regi_path 

