import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import numpy as np
import os
from tqdm import tqdm
import time
import matplotlib.pyplot as plt

# 定义超参数
class Config:
    dataset = 'audio'   # 数据集名称
    nThreads = 40       # 预取数据的线程数
    batchSize = 64      # 批大小
    loadSize = 22050 * 20  # 加载时的大小
    fineSize = 22050 * 20  # 裁剪的大小
    lr = 0.001          # 学习率
    lambda_ = 250
    beta1 = 0.9         # Adam 的动量项
    meanIter = 0        # 估计均值的迭代次数
    saveIter = 5000     # 保存检查点的间隔
    niter = 10000       # 数据集的迭代次数
    ntrain = float('inf') # 每个 epoch 的大小
    gpu = 1             # 使用的 GPU
    cudnn = True        # 是否使用 cudnn
    finetune = ''       # 如果设置了，则加载这个网络而不是从头开始
    name = 'soundnet'   # 实验的名称
    randomize = True    # 是否打乱数据文件
    display_port = 8001 # 推送图表的端口
    display_id = 1      # 推送图表的窗口 ID
    data_root = '/data/vision/torralba/crossmodal/flickr_videos/soundnet/mp3'
    label_binary_file = '/data/vision/torralba/crossmodal/soundnet/features/VGG16_IMNET_TRAIN_B%04d/prob'
    label2_binary_file = '/data/vision/torralba/crossmodal/soundnet/features/VGG16_PLACES2_TRAIN_B%04d/prob'
    label_text_file = '/data/vision/torralba/crossmodal/soundnet/lmdbs/train_frames4_%04d.txt'
    label_dim = 1000
    label2_dim = 401
    label_time_steps = 4
    video_frame_time = 5 # 5 seconds
    sample_rate = 22050
    mean = 0

opt = Config()

# 数据集类
class SoundNetDataset(Dataset):
    def __init__(self, data_root, label_file, transform=None):
        self.data_root = data_root
        self.label_file = label_file
        self.transform = transform
        self.data = self.load_data()

    def load_data(self):
        # 假设 label_file 包含音频文件路径和标签
        with open(self.label_file, 'r') as f:
            data = [line.strip().split() for line in f]
        return data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        audio_path, label = self.data[idx]
        audio = self.load_audio(audio_path)
        if self.transform:
            audio = self.transform(audio)
        label = int(label)
        return audio, label

    def load_audio(self, path):
        # 加载音频文件的逻辑
        audio = np.random.randn(opt.fineSize) # 示例音频数据
        return torch.from_numpy(audio).float()

# 创建数据加载器
train_dataset = SoundNetDataset(opt.data_root, opt.label_text_file)
train_loader = DataLoader(train_dataset, batch_size=opt.batchSize, shuffle=opt.randomize, num_workers=opt.nThreads)

# 定义模型
class SoundNet(nn.Module):
    def __init__(self, num_classes=1000):
        super(SoundNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv1d(1, 16, kernel_size=64, stride=2, padding=32),
            nn.BatchNorm1d(16),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=8, stride=8),
            nn.Conv1d(16, 32, kernel_size=32, stride=2, padding=16),
            nn.BatchNorm1d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=8, stride=8),
            nn.Conv1d(32, 64, kernel_size=16, stride=2, padding=8),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 128, kernel_size=8, stride=2, padding=4),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.Conv1d(128, 256, kernel_size=4, stride=2, padding=2),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=4, stride=4),
            nn.Conv1d(256, 512, kernel_size=4, stride=2, padding=2),
            nn.BatchNorm1d(512),
            nn.ReLU(inplace=True),
            nn.Conv1d(512, 1024, kernel_size=4, stride=2, padding=2),
            nn.BatchNorm1d(1024),
            nn.ReLU(inplace=True)
        )
        self.classifier = nn.Sequential(
            nn.Conv1d(1024, num_classes, kernel_size=8, stride=2),
            nn.LogSoftmax(dim=1)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.classifier(x)
        return x

net = SoundNet(num_classes=opt.label_dim)

# 迁移到 GPU
if opt.gpu > 0:
    net = net.cuda()

# 定义损失函数和优化器
criterion = nn.KLDivLoss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

# 训练循环
def train():
    net.train()
    for epoch in range(opt.niter):
        epoch_loss = 0
        for i, (inputs, targets) in enumerate(tqdm(train_loader)):
            if opt.gpu > 0:
                inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs.unsqueeze(1))
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            if i % opt.saveIter == 0:
                save_checkpoint(net, epoch, i)
        print(f'Epoch [{epoch+1}/{opt.niter}], Loss: {epoch_loss/len(train_loader):.4f}')

def save_checkpoint(model, epoch, iteration):
    checkpoint_dir = 'checkpoints'
    os.makedirs(checkpoint_dir, exist_ok=True)
    checkpoint_path = os.path.join(checkpoint_dir, f'{opt.name}_epoch{epoch}_iter{iteration}.pth')
    torch.save(model.state_dict(), checkpoint_path)
    print(f'Saved checkpoint: {checkpoint_path}')

if __name__ == "__main__":
    train()
