import torch
import torchaudio
import os
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from collections import defaultdict

class SoundNetDataset(Dataset):
    def __init__(self, data_root, label_text_file, label_binary_file, label2_binary_file, label_dim, label2_dim, label_time_steps, video_frame_time, sample_rate, load_size, fine_size, mean):
        self.data_root = data_root
        self.label_text_file = label_text_file
        self.label_binary_file = label_binary_file
        self.label2_binary_file = label2_binary_file
        self.label_dim = label_dim
        self.label2_dim = label2_dim
        self.label_time_steps = label_time_steps
        self.video_frame_time = video_frame_time
        self.sample_rate = sample_rate
        self.load_size = load_size
        self.fine_size = fine_size
        self.mean = mean

        self.frames = []
        self.videos = []
        self.video_position = []
        self.video_lengths = []

        self.load_metadata()
        self.load_labels()

    def load_metadata(self):
        last_video = 'first'
        position = 1
        lengths = 1

        with open(self.label_text_file, 'r') as f:
            for line in f:
                split = line.strip().split()
                self.frames.append(split[0])

                cur_video = split[0][62:-13]
                if cur_video != last_video:
                    self.videos.append(cur_video)
                    self.video_position.append(position)

                    if last_video != 'first':
                        self.video_lengths.append(lengths)
                        assert lengths <= self.label_time_steps

                    last_video = cur_video
                    lengths = 1
                else:
                    lengths += 1

                position += 1

        self.video_lengths.append(lengths)
        assert lengths <= self.label_time_steps

        assert len(self.video_position) == len(self.videos)
        assert len(self.video_lengths) == len(self.videos)

    def load_labels(self):
        self.labels = torch.FloatTensor(len(self.frames), self.label_dim)
        self.labels2 = torch.FloatTensor(len(self.frames), self.label2_dim)

        with open(self.label_binary_file, 'rb') as f:
            self.labels.copy_(torch.from_numpy(np.fromfile(f, dtype=np.float32)).reshape(len(self.frames), self.label_dim))

        with open(self.label2_binary_file, 'rb') as f:
            self.labels2.copy_(torch.from_numpy(np.fromfile(f, dtype=np.float32)).reshape(len(self.frames), self.label2_dim))

        assert (self.labels >= 0).all()
        assert (self.labels <= 1).all()
        assert abs(self.labels.sum(1).mean().item() - 1) < 0.001

        assert (self.labels2 >= 0).all()
        assert (self.labels2 <= 1).all()
        assert abs(self.labels2.sum(1).mean().item() - 1) < 0.001

    def __len__(self):
        return len(self.videos)

    def __getitem__(self, idx):
        video = self.videos[idx]
        video_pos = self.video_position[idx]
        video_length = self.video_lengths[idx]
        seconds_of_video = video_length * self.video_frame_time

        data_path = os.path.join(self.data_root, video + '.mp3')
        input = self.load_audio(data_path)
        label = self.labels[video_pos:video_pos + video_length]
        label2 = self.labels2[video_pos:video_pos + video_length]

        if input.shape[0] < seconds_of_video * self.sample_rate:
            orig_size = input.shape[0]
            input.resize_(seconds_of_video * self.sample_rate)
            input[orig_size:] = 0

        if video_length < self.label_time_steps:
            repeat_times = -(-self.load_size // input.shape[0])  # Ceiling division
            input = input.repeat(repeat_times)
            input = input[:self.load_size]

            label = label.repeat(self.label_time_steps, 1)[:self.label_time_steps]
            label2 = label2.repeat(self.label_time_steps, 1)[:self.label_time_steps]
        else:
            input = input[:self.load_size]
            assert video_length == self.label_time_steps

        label = label.t()
        label2 = label2.t()
        input -= self.mean

        return input, label, label2

    def load_audio(self, path):
        try:
            waveform, sample_rate = torchaudio.load(path)
            if waveform.shape[0] > 1:
                waveform = waveform[0, :]  # Use the first channel if stereo
            waveform = waveform.flatten()
        except Exception as e:
            print(f'warning: failed loading: {path}')
            waveform = torch.zeros(self.load_size)

        waveform *= 2**-23
        assert waveform.max() <= 256
        assert waveform.min() >= -256
        assert waveform.dim() == 1
        return waveform

# 示例配置
class Config:
    dataset = 'audio'  # 数据集名称
    nThreads = 4  # 预取数据的线程数
    batchSize = 64  # 批大小
    loadSize = 22050 * 20  # 加载时的大小
    fineSize = 22050 * 20  # 裁剪的大小
    lr = 0.001  # 学习率
    lambda_ = 250
    beta1 = 0.9  # Adam 的动量项
    meanIter = 0  # 估计均值的迭代次数
    saveIter = 5000  # 保存检查点的间隔
    niter = 10000  # 数据集的迭代次数
    ntrain = float('inf')  # 每个 epoch 的大小
    gpu = 1  # 使用的 GPU
    cudnn = True  # 是否使用 cudnn
    finetune = ''  # 如果设置了，则加载这个网络而不是从头开始
    name = 'soundnet'  # 实验的名称
    randomize = True  # 是否打乱数据文件
    display_port = 8001  # 推送图表的端口
    display_id = 1  # 推送图表的窗口 ID
    data_root = '/data/vision/torralba/crossmodal/flickr_videos/soundnet/mp3'
    label_binary_file = '/data/vision/torralba/crossmodal/soundnet/features/VGG16_IMNET_TRAIN_B%04d/prob'
    label2_binary_file = '/data/vision/torralba/crossmodal/soundnet/features/VGG16_PLACES2_TRAIN_B%04d/prob'
    label_text_file = '/data/vision/torralba/crossmodal/soundnet/lmdbs/train_frames4_%04d.txt'
    label_dim = 1000
    label2_dim = 401
    label_time_steps = 4
    video_frame_time = 5  # 5 seconds
    sample_rate = 22050
    mean = 0

opt = Config()

# 创建数据加载器
train_dataset = SoundNetDataset(
    data_root=opt.data_root,
    label_text_file=opt.label_text_file,
    label_binary_file=opt.label_binary_file,
    label2_binary_file=opt.label2_binary_file,
    label_dim=opt.label_dim,
    label2_dim=opt.label2_dim,
    label_time_steps=opt.label_time_steps,
    video_frame_time=opt.video_frame_time,
    sample_rate=opt.sample_rate,
    load_size=opt.loadSize,
    fine_size=opt.fineSize,
    mean=opt.mean
)
train_loader = DataLoader(train_dataset, batch_size=opt.batchSize, shuffle=opt.randomize, num_workers=opt.nThreads)

# 示例数据获取
for data, label, label2 in train_loader:
    print(data.shape, label.shape, label2.shape)
    break
