from torch.utils.data import Dataset, DataLoader
from CSBSmodel import CSBSModel
import data_processing
import os
import torch
import torchaudio

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 假设统一为32帧
MAX_FRAMES = 24

class DMERdataset(Dataset):
    def __init__(self, label_path, audio_dir, transform=None, target_transform=None, target_sample_rate=44100, label_type="A"):
        self.label_path = label_path
        self.audio_dir = audio_dir
        self.transform = transform
        self.target_transform = target_transform
        self.target_sample_rate = target_sample_rate
        self.va_matrix, self.music_id = data_processing.read_label(self.label_path)
        self.label_type = label_type  # "A" 或 "V"

    def __len__(self):
        return len(self.music_id)

    def __getitem__(self, idx):
        audio_path = os.path.join(self.audio_dir, f"{self.music_id[idx]}.mp3")
        va_vals = self.va_matrix[idx]  # shape: [frame_nums, 2]
        va_vals = torch.tensor(va_vals, dtype=torch.float32, device=DEVICE)
        waveform, sample_rate = torchaudio.load(audio_path, format="mp3")
        waveform = waveform.to(DEVICE)
        if sample_rate != self.target_sample_rate:
            resampler = torchaudio.transforms.Resample(
                orig_freq=sample_rate,
                new_freq=self.target_sample_rate
            ).to(DEVICE)
            waveform = resampler(waveform)
        waveform = waveform[:, self.target_sample_rate*15:]
        if waveform.shape[0] > 1:
            waveform = torch.mean(waveform, dim=0, keepdim=True)
        waveform = waveform / waveform.abs().max()
        frames = data_processing.frame_audio(waveform, frame_length=22050, frame_step=22050).to(DEVICE)
        frames_windowed = data_processing.windows(frames, 22050).to(DEVICE)
        if self.transform:
            self.transform = self.transform.to(DEVICE)
            frames_windowed = self.transform(frames_windowed)
        if self.target_transform:
            va_vals = self.target_transform(va_vals)
        # 只取A或V
        if self.label_type == "A":
            label = va_vals[:, 0]  # 只取arousal
        elif self.label_type == "V":
            label = va_vals[:, 1]  # 只取valence
        else:
            raise ValueError("label_type must be 'A' or 'V'")
        label = label.view(-1, 1)  # 保证label为二维
        # 保证帧数和标签数一致
        if frames_windowed.shape[1] < label.shape[0]:
            label = label[:frames_windowed.shape[1]]
        if frames_windowed.shape[1] != label.shape[0]:
            raise Exception("音频帧数与标签数量不等")
        T = frames_windowed.shape[1]
        if T < MAX_FRAMES:
            pad = MAX_FRAMES - T
            pad_tensor = torch.zeros(frames_windowed.shape[0], pad, frames_windowed.shape[2], frames_windowed.shape[3], device=frames_windowed.device)
            frames_windowed = torch.cat([frames_windowed, pad_tensor], dim=1)
            label = torch.cat([label, torch.zeros(pad, 1, device=label.device)], dim=0)
        elif T > MAX_FRAMES:
            frames_windowed = frames_windowed[:, :MAX_FRAMES, :, :]
            label = label[:MAX_FRAMES]
        return frames_windowed, label

# 用法示例
if __name__ == '__main__':
    transform_mfcc = torchaudio.transforms.MFCC(
        sample_rate=44100,
        n_mfcc=30,
        log_mels=True,
        melkwargs={"n_mels": 128, "n_fft": 2048, "hop_length": 512}
    ).to(DEVICE)
    dataset_A = DMERdataset("D:\Learning materials\Yan2\声光智能控制\GA-BP2\DMER2_0\datasets\\train\dmer_annotations(std).csv",
                            "D:\Learning materials\Yan2\声光智能控制\GA-BP2\DMER2_0\datasets\\train\chorus",
                            transform=transform_mfcc,
                            target_sample_rate=44100,
                            label_type="A")
    dataset_V = DMERdataset("D:\Learning materials\Yan2\声光智能控制\GA-BP2\DMER2_0\datasets\\train\dmer_annotations(std).csv",
                            "D:\Learning materials\Yan2\声光智能控制\GA-BP2\DMER2_0\datasets\\train\chorus",
                            transform=transform_mfcc,
                            target_sample_rate=44100,
                            label_type="V")
    loader_A = DataLoader(dataset_A, batch_size=1)
    loader_V = DataLoader(dataset_V, batch_size=1)
    for frames, label in loader_A:
        print("Arousal标签 shape:", label.shape)
        break
    for frames, label in loader_V:
        print("Valence标签 shape:", label.shape)
        break
