import torch
import torchaudio
from torch import nn
import json
from dataset.vocab import Vocab, tokenizer
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import os

root_path = "./data"

# ======================= 文本信息 ============================
data = []
with open(os.path.join(root_path, "train.jsonl"), "r", encoding="utf-8") as f:
    lines = f.readlines()
    for line in lines:
        obj = json.loads(line)
        data.append(obj['conversation'][0]['human'])

tokens = tokenizer(data, mode="char")
vocab = Vocab(tokens, retired_tokens=['<blank>'])
token_idx = [torch.tensor(vocab.to_idx(line)) for line in tokens]


def get_vocab():
    return vocab


class SpeechDataset(Dataset):
    def __init__(self, audio_paths, token_idx):
        super().__init__()
        self.audio_paths = audio_paths
        self.token_idx = token_idx

    def __len__(self):
        return len(self.audio_paths)

    def __getitem__(self, index):
        path = self.audio_paths[index]
        waveform, sample_rate = torchaudio.load(os.path.join(root_path, "audio", path))

        # 特征提取配置
        mel_transform = torchaudio.transforms.MelSpectrogram(
            sample_rate=16000,  # 输入音频采样率
            n_fft=400,  # FFT窗口大小
            hop_length=160,  # 帧移(默认win_length//2)
            n_mels=80,  # 梅尔滤波器的数量
        )

        if waveform.numel() == 0:  # 处理异常值的方法
            # 静默一秒
            waveform = torch.zeros((1, 16000))

        # 音频的采样率，在训练时，需要统一
        if sample_rate != 16000:
            resampler = torchaudio.transforms.Resample(sample_rate, 16000)
            # 强行更改波形的采样率
            waveform = resampler(waveform)

        mel_spec = mel_transform(waveform).squeeze(0).transpose(0, 1)
        return mel_spec, self.token_idx[index]


def collate_fn(batch):
    mel_spec, token_idx = zip(*batch)
    mel_spec_valid_lens = torch.tensor([len(item) for item in mel_spec])
    mel_spec = pad_sequence(mel_spec, batch_first=True, padding_value=0)
    token_idx_valid_lens = torch.tensor([len(item) for item in token_idx])
    token_idx = pad_sequence(token_idx, batch_first=True, padding_value=0)
    return mel_spec, token_idx, mel_spec_valid_lens, token_idx_valid_lens


def generate_loader(batch_size=20):
    audio_paths = os.listdir(os.path.join(root_path, "audio"))
    dataset = SpeechDataset(audio_paths, token_idx)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

# if __name__ == '__main__':
#     loader = generate_loader()
#     for mel_spec, token_idx, mel_spec_valid_lens, token_idx_valid_lens in loader:
#         print(mel_spec.shape)  # (20, 835, 80) # 音频特征数据
#         print(token_idx.shape)  # (20,21) # 文本特征数据
#         break
