import random
import torch
import torchaudio
import os
import glob
from torch.utils.data import Dataset, DataLoader
from torch_stft import STFT
from pystct import sdct_torch, isdct_torch


class AudioProcessor():
    """
    Function to preprocess the audios from the custom 
    dataset. We set the [_limit] in terms of samples,
    the [_frame_length] and [_frame_step] of the [transform]
    transform. 

    If transform is [cosine] it returns just the STDCT matrix.
    Else, if transform is [fourier] returns the STFT magnitude
    and phase.
    """
    def __init__(self, transform : str = 'cosine', isHost : bool = True):
        # Corresponds to 1.5 seconds approximately
        self._limit = 67522 # 2 ** 16 + 2 ** 11 - 2 ** 6 + 2

        if isHost: # Host audio -> [1, 1024, 512]
            self._frame_length = 2 ** 10 if transform == 'cosine' else 2 ** 11 - 1
            self._frame_step = 130 if transform == 'cosine' else 132
        else: # Secret audio -> [1, 1024, 256]
            self._frame_length = 2 ** 10 if transform == 'cosine' else 2 ** 11 - 1
            self._frame_step = 2 ** 8 + 4 if transform == 'cosine' else 132

        self._transform = transform
        if self._transform == 'fourier':
            self.stft = STFT(
                filter_length=self._frame_length, 
                hop_length=self._frame_step, 
                win_length=self._frame_length,
                window='hann'
            )   

    def forward(self, audio_path):
        self.sound, self.sr = torchaudio.load(audio_path)
        
        # Get the samples dimension
        sound = self.sound[0]
        # Create a temporary array
        tmp = torch.zeros([self._limit, ]).normal_(mean = 0, std = 0.005)
        # Cut the audio on limit
        if sound.numel() < self._limit:
            tmp[:sound.numel()] = sound[:]
        else:
            i = random.randint(0, len(sound) - self._limit)
            tmp[:] = sound[i:i + self._limit]
        if self._transform == 'cosine':
            return sdct_torch(
                tmp.type(torch.float32),
                frame_length = self._frame_length,
                frame_step = self._frame_step
            )
        elif self._transform == 'fourier':
            magnitude, phase = self.stft.transform(tmp.unsqueeze(0).type(torch.float32))
            return magnitude, phase

        else: raise Exception(f'Transform not implemented')

# 假设 torchaudio 已经可以处理 FLAC 格式，如果不行我们以后可以增加转换步骤

class StegoDataset(Dataset):
    # max_audio_limit=17584
    def __init__(self, root_dir, folder='train', transform='cosine', max_limit=9000, max_audio_limit=9000):
        self.root_dir = root_dir
        self.folder = folder
        self.transform = transform
        self.max_limit = max_limit
        self.max_audio_limit = max_audio_limit
        self.host_audio_paths = self._get_audio_paths('FSDnoisy18k', '.wav')
        self.secret_audio_paths = self._get_audio_paths('VCTK-Corpus/wav48', '.flac')
        self.host_processor = AudioProcessor(transform=self.transform, isHost=True)
        self.secret_processor = AudioProcessor(transform=self.transform, isHost=False)

    def _get_audio_paths(self, dataset_name, extension):
        audio_root = os.path.join(self.root_dir, dataset_name, self.folder if dataset_name == 'FSDnoisy18k' else '')
        if dataset_name == 'FSDnoisy18k':
            audio_paths = sorted(glob.glob(os.path.join(audio_root, f'*{extension}')))
            if self.folder == 'train':
                return audio_paths[:self.max_audio_limit]
            else:
                return audio_paths[-self.max_audio_limit:]
        else:
            speaker_dirs = sorted(glob.glob(os.path.join(audio_root, 'p*')))
            audio_paths = []
            for dir in speaker_dirs:
                audio_paths.extend(sorted(glob.glob(os.path.join(dir, f'*{extension}'))))
            if self.folder == 'train':
                return audio_paths[:self.max_limit]
            else:
                return audio_paths[-self.max_limit:]

    def __len__(self):
        return min(len(self.secret_audio_paths), self.max_limit)

    def __getitem__(self, idx):
        host_audio_path = self.host_audio_paths[idx % len(self.host_audio_paths)]
        secret_audio_path = self.secret_audio_paths[idx]

        host_audio = self.host_processor.forward(host_audio_path)
        secret_audio = self.secret_processor.forward(secret_audio_path)

        return host_audio, secret_audio

def loader(root_dir : str, folder : str = 'train', batch_size : int = 1, shuffle : bool = True, num_workers : int = 4, transform : str = 'cosine') -> DataLoader:
    dataset = StegoDataset(root_dir=root_dir, folder=folder, transform=transform)
    return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)

# 使用示例
# train_loader = loader(root_dir='path/to/dataset', folder='train')
# test_loader = loader(root_dir='path/to/dataset', folder='test')

if __name__=='__main__':
    def test_stego_dataset_and_loader():
        # 指定数据集的根目录
        root_dir = './data'  # 请替换为您的数据集路径
        batch_size=1
        # 测试数据集和数据加载器
        data_loader = loader(root_dir=root_dir, folder='train', batch_size=batch_size, transform='cosine')

        # 遍历数据集中的几个批次来检查数据
        for i, (host_audio, secret_audio) in enumerate(data_loader):
            print(f"Batch {i}, Host Audio Shape: {host_audio.shape}, Secret Audio Shape: {secret_audio.shape}")
            # 检查一些简单的属性，如形状
            assert host_audio.shape[0] == batch_size  # 批量大小为1
            assert secret_audio.shape[0] == batch_size  # 批量大小为1

            if i == 2:  # 仅检查前几个批次
                break

        print("测试通过，StegoDataset 和 loader() 工作正常。")

    # 运行测试
    test_stego_dataset_and_loader()