import os
import time
from pathlib import Path
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import BatchSampler
from torch.nn.utils.rnn import pad_sequence
from functools import lru_cache
import json
import torchaudio
import random


class Emilia(Dataset):
    def __init__(self, data_dir, sample_rate=16000, files=None):
        self.data_dir = data_dir
        # self.tokenizer = tokenizer
        self.sample_rate = sample_rate
        if files is not None:
            self.files = files
        else:
            # 扫描子目录
            self.files = []
            for subdir in self.data_dir.glob("ZH-B*"):
                for mp3_path in subdir.glob("*.mp3"):
                    json_path = mp3_path.with_suffix(".json")
                    if json_path.exists():
                        self.files.append((mp3_path, json_path))

    def __len__(self):
        return len(self.files)

    def __getitem__(self, idx):
        mp3_path, json_path = self.files[idx]
        # print(mp3_path)
        # 加载音频
        waveform, sr = torchaudio.load(mp3_path)
        # 重采样
        if sr != self.sample_rate:
            waveform = torchaudio.functional.resample(waveform, sr, self.sample_rate)

        # 加载文本标注
        with open(json_path, "r", encoding="utf-8") as f:
            metadata = json.load(f)
            text = metadata["text"]
            duration = metadata["duration"]

        return {"audio": waveform, "text": text, "duration": duration}

    @staticmethod
    def split_dataset(data_dir, train_ratio=0.8, val_ratio=0.1, test_ratio=0.1, seed=42):
        """
        可重复性划分训练集、验证集和测试集
        """
        data_dir = Path(data_dir)
        files = []
        for subdir in data_dir.glob("ZH-B*"):
            for mp3_path in subdir.glob("*.mp3"):
                json_path = mp3_path.with_suffix(".json")
                if json_path.exists():
                    files.append((mp3_path, json_path))
        random.seed(seed)
        files = sorted(files)  # 保证顺序一致
        random.shuffle(files)
        n = len(files)
        n_train = int(n * train_ratio)
        n_val = int(n * val_ratio)
        train_files = files[:n_train]
        val_files = files[n_train:n_train+n_val]
        test_files = files[n_train+n_val:]
        return train_files, val_files, test_files


def collate_fn(batch):
    """自定义collate函数处理变长音频"""
    # 分离不同数据类型
    audios = [item["audio"].squeeze(0) for item in batch]  # 去除通道维度
    texts = [item["text"] for item in batch]
    durations = [item["duration"] for item in batch]

    # 获取实际音频长度
    lengths = torch.tensor([audio.size(0) for audio in audios])

    # 对音频进行padding
    audios_padded = pad_sequence(audios, batch_first=True)

    # 恢复通道维度
    return {
        "audio": audios_padded.unsqueeze(1),  # [batch, 1, max_length]
        "lengths": lengths,
        "text": texts,
        "duration": torch.tensor(durations),
    }


# 根据长度动态规划patch_size
class DynamicBatchSampler(BatchSampler):
    def __init__(self, dataset, max_frames=160000, shuffle=True):
        self.dataset = dataset
        self.max_frames = max_frames  # 最大总采样点数/批
        self.shuffle = shuffle

    def __iter__(self):
        indices = []
        current_frames = 0
        if self.shuffle:
            order = torch.randperm(len(self.dataset))
        else:
            order = torch.arange(len(self.dataset))
        for idx in order:
            audio_length = self.dataset[idx]["audio"].size(1)
            if current_frames + audio_length > self.max_frames:
                yield indices
                indices = []
                current_frames = 0
            indices.append(idx)
            current_frames += audio_length
        if len(indices) > 0:
            yield indices

    def __len__(self):
        # 用样本数和平均长度估算batch数
        avg_len = 10 * self.dataset.sample_rate  # 假设平均10秒
        total_frames = len(self.dataset) * avg_len
        return max(1, int(total_frames // self.max_frames))


if __name__ == "__main__":
    start = time.time()
    data_dir = Path("./data/Emilia-YODAS/ZH")
    batch_time = 64  # 一个batch多少秒
    sample_rate = 16000
    dataset = Emilia(data_dir=data_dir, sample_rate=sample_rate)
    end = time.time()
    print(f"duration:{end-start}")
    print(len(dataset.files))
    sample = dataset[0]
    print(sample["text"])
    print(sample["duration"])
    print(sample["audio"].shape)

# 划分数据集
    train_files, val_files, test_files = Emilia.split_dataset(data_dir, train_ratio=0.8, val_ratio=0.1, test_ratio=0.1, seed=42)
    train_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=train_files)
    val_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=val_files)
    test_set = Emilia(data_dir=data_dir, sample_rate=sample_rate, files=test_files)
    train_loader = DataLoader(
        train_set,
        batch_sampler=DynamicBatchSampler(train_set, max_frames=batch_time * sample_rate, shuffle=True),
        num_workers=4,
        collate_fn=collate_fn,
    )
    val_loader = DataLoader(
        val_set,
        batch_sampler=DynamicBatchSampler(val_set, max_frames=batch_time * sample_rate, shuffle=False),
        num_workers=2,
        collate_fn=collate_fn,
    )
    test_loader = DataLoader(
        test_set,
        batch_sampler=DynamicBatchSampler(test_set, max_frames=batch_time * sample_rate, shuffle=False),
        num_workers=2,
        collate_fn=collate_fn,
    )

    # 检查数据加载
    for batch in train_loader:
        print("[Train] 批次音频形状:", batch["audio"].shape)
        print("音频实际长度:", batch["lengths"][:5])
        print("前5个文本:", batch["text"][:5])
        break
    for batch in val_loader:
        print("[Val] 批次音频形状:", batch["audio"].shape)
        print("音频实际长度:", batch["lengths"][:5])
        print("前5个文本:", batch["text"][:5])
        break
    for batch in test_loader:
        print("[Test] 批次音频形状:", batch["audio"].shape)
        print("音频实际长度:", batch["lengths"][:5])
        print("前5个文本:", batch["text"][:5])
        break
