import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
import soundfile as sf
import IPython.display as ipd


def get_padding_batch_data(batch, normalize = True):
    mel_spec = [torch.tensor(sample['mel_spec']).T for sample in batch]
    phones = [torch.tensor(sample['phones']) for sample in batch]
    pitches = [torch.tensor(sample['pitches']) for sample in batch]
    energies = [torch.tensor(sample['energies']) for sample in batch]

    padded_mel_spectrograms = pad_sequence(mel_spec, batch_first=True, padding_value=0).float()
    padded_phonemes = pad_sequence(phones, batch_first=True, padding_value=0).float()
    padded_pitches = pad_sequence(pitches, batch_first=True, padding_value=0).float()
    padded_energies = pad_sequence(energies, batch_first=True, padding_value=0).float()

    if normalize is True:
        max_phoneme = torch.max(padded_phonemes)
        max_pitch = torch.max(padded_pitches)
        max_energy = torch.max(padded_energies)
        
        padded_phonemes /= max_phoneme
        padded_pitches /= max_pitch
        padded_energies /= max_energy


    lengths = torch.tensor([len(seq) for seq in phones])

    # 掩码
    mask = (padded_phonemes != 0)
    return padded_mel_spectrograms, padded_phonemes, padded_pitches, padded_energies, lengths, mask


def mel_spec_2_wav(mel_spec, config, wav_name = 'generated_audio.wav'):
    output_np = mel_spec.squeeze().detach().cpu().numpy()
    # 将音频数据保存为 WAV 文件
    sf.write(wav_name, output_np, config.sampling_rate)
    # 播放音频
    return ipd.Audio('generated_audio.wav')






