import os
import torch
import torchaudio
import logging

from torch.utils.data import Dataset

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


def getXY(tr_dir, cv_dir, num_spk=2):
    """
    Get input training and cv X, Y file paths (Only apply in a specifc data file pattern)
    """
    files = [f for f in os.listdir(os.path.join(tr_dir, "mix")) if f.endswith(".wav")]
    x_train = [os.path.join(tr_dir, "mix", f) for f in files]
    y_train = []
    for i in range(1, num_spk + 1):
        y_train.append([os.path.join(tr_dir, f"s{str(i)}", f) for f in files])
        pass
    files = [f for f in os.listdir(os.path.join(cv_dir, "mix")) if f.endswith(".wav")]
    x_cv = [os.path.join(cv_dir, "mix", f) for f in files]
    y_cv = []
    for i in range(1, num_spk + 1):
        y_cv.append([os.path.join(cv_dir, f"s{str(i)}", f) for f in files])
        pass
    logger.info(f" train: {len(x_train)} , cross validation: {len(x_cv)}")
    return x_train, y_train, x_cv, y_cv

def getTestSet(tr_dir, num_spk=2):
    """
    Get input training and cv X, Y file paths (Only apply in a specifc data file pattern)
    """
    files = [f for f in os.listdir(os.path.join(tr_dir, "mix")) if f.endswith(".wav")]
    x_train = [os.path.join(tr_dir, "mix", f) for f in files]
    y_train = []
    for i in range(1, num_spk + 1):
        y_train.append([os.path.join(tr_dir, f"s{str(i)}", f) for f in files])
        pass
    return x_train, y_train


class AudioData(Dataset):
    def __init__(self, x, y, device, num_spk=2, length=32000, frequency=8000):
        self.x = x
        self.y = y  ## [2, 5000]
        self.device = device
        self.length = length
        self.num_spk = num_spk
        self.frequency = frequency
        assert num_spk == len(
            y
        ), "the num speaker should match the first dimension of y"

    def __len__(self):
        return len(self.y[0])

    def __getitem__(self, idx):
        input, frequency = torchaudio.load(self.x[idx])
        input = input.to(self.device)
        assert (
            frequency == self.frequency
        ), "the input frequency does not align with the data frequency"
        ## pad zero to the length
        if input.size(1) >= self.length:
            input = input[:, : self.length]
        else:
            x = torch.zeros(1, self.length).to(self.device)
            x[:, : input.size(1)] = input
            input = x
        if self.length ==-1 or self.length == None:
            output = [] 
            for i in range(0,self.num_spk):
                temp_output = torchaudio.load(self.y[i][idx])
                output.append(temp_output)
                pass
            output_new = torch.zeros(self.num_spk, output[0].size(-1))
            for idx,ele in enumerate(output):
                output_new[idx] = ele 
            
            return input, output_new.to(self.device)
        else:
            output = torch.zeros(self.num_spk, self.length).to(self.device)
            for i in range(0, self.num_spk):
                temp_output, frequency = torchaudio.load(self.y[i][idx])
                temp_output = temp_output.to(self.device)
                assert (
                    frequency == self.frequency
                ), "the output frequency does not align with the data frequency"
                if temp_output.size(1) >= self.length:
                    output[i] = temp_output[0, : self.length]
                else:
                    temp_output_1 = torch.zeros(1, self.length).to(self.device)
                    temp_output_1[:, : temp_output.size(1)] = temp_output
                    output[i] = temp_output_1[0]
            return input, output


if __name__ == "__main__":
    device = "cpu"
    x = ["sm.wav"]
    y = [["s1.wav", "s2.wav"]]
    audioData = AudioData(x, y, device)
    input, output = audioData[0]
    print(input.shape)
    print(output.shape)
    pass
