import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda.amp import autocast, GradScaler
import numpy as np
import os

from torch.utils.data import DataLoader

from utils import MUSDBDataset, save_checkpoint, load_config, plot_training_curves
from tqdm import tqdm


class Demucs(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.num_sources = config['num_sources']
        self.encoder, self.decoder = nn.ModuleList(), nn.ModuleList()
        self.lstm = None

        # 编码器
        in_channels = 2
        for i in range(config['depth']):
            out_channels = int(config['channels'] * (config['growth'] ** i))
            self.encoder.append(nn.Sequential(
                nn.Conv1d(in_channels, out_channels, config['kernel_size'],
                          config['stride'], padding=config['context']),
                nn.ReLU(),
                nn.GroupNorm(1, out_channels)
            ))
            in_channels = out_channels

        # LSTM
        self.lstm = nn.LSTM(input_size=in_channels, hidden_size=in_channels,
                            num_layers=config['lstm_layers'], bidirectional=True)
        self.lstm_proj = nn.Linear(2 * in_channels, in_channels)

        # 解码器
        for i in reversed(range(config['depth'])):
            out_channels = int(config['channels'] * (config['growth'] ** (i - 1))) if i > 0 else 2 * self.num_sources
            self.decoder.append(nn.Sequential(
                nn.ConvTranspose1d(2 * in_channels, out_channels, config['kernel_size'],
                                   config['stride'], padding=config['context']),
                nn.ReLU() if i > 0 else nn.Identity(),
                nn.GroupNorm(1, out_channels) if i > 0 else nn.Identity()
            ))
            in_channels = out_channels

    def forward(self, x):
        # 编码
        skips = []
        for layer in self.encoder:
            x = layer(x)
            skips.append(x)

        # LSTM
        x = x.permute(2, 0, 1)
        x, _ = self.lstm(x)
        x = self.lstm_proj(x)
        x = x.permute(1, 2, 0)

        # 解码
        for i, layer in enumerate(self.decoder):
            skip = skips[-(i + 1)]
            # 裁剪 x 或 skip 以匹配维度
            if x.size(2) > skip.size(2):
                x = x[:, :, :skip.size(2)]
            elif skip.size(2) > x.size(2):
                skip = skip[:, :, :x.size(2)]
            x = torch.cat([x, skip], dim=1)
            x = layer(x)

        # 分离音轨
        B, _, T = x.shape
        x = x.view(B, self.num_sources, 2, T)
        return x


def train():
    # 加载配置
    config = load_config('config.yaml')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 数据集
    train_dataset = MUSDBDataset(config['train']['data_root'], 'train',
                                 config['train']['segment_length'])
    valid_dataset = MUSDBDataset(config['train']['data_root'], 'test',
                                 config['train']['segment_length'])

    train_loader = DataLoader(train_dataset, batch_size=config['train']['batch_size'],
                              shuffle=True, num_workers=config['train']['num_workers'])
    valid_loader = DataLoader(valid_dataset, batch_size=config['train']['batch_size'],
                              num_workers=config['train']['num_workers'])

    # 模型
    model = Demucs(config['model']).to(device)
    optimizer = optim.Adam(model.parameters(), lr=config['train']['lr'])
    criterion = nn.L1Loss()
    scaler = GradScaler()

    # 循环
    train_losses, valid_losses = [], []
    os.makedirs(config['train']['save_dir'], exist_ok=True)

    for epoch in range(config['train']['epochs']):
        # 训练
        model.train()
        epoch_train_loss = 0
        progress = tqdm(train_loader, desc=f"Epoch {epoch + 1} [Train]")
        for mix, targets in progress:
            mix, targets = mix.to(device), targets.to(device)

            optimizer.zero_grad()

            with autocast():
                preds = model(mix)
                if preds.size(3) > targets.size(3):
                    preds = preds[:, :, :, :targets.size(3)]
                elif targets.size(3) > preds.size(3):
                    targets = targets[:, :, :, :preds.size(3)]
                loss = criterion(preds, targets)

            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()

            epoch_train_loss += loss.item()
            progress.set_postfix(loss=loss.item())

        # 验证
        model.eval()
        epoch_valid_loss = 0
        with torch.no_grad():
            for mix, targets in valid_loader:
                mix, targets = mix.to(device), targets.to(device)
                preds = model(mix)
                if preds.size(3) > targets.size(3):
                    preds = preds[:, :, :, :targets.size(3)]
                elif targets.size(3) > preds.size(3):
                    targets = targets[:, :, :, :preds.size(3)]
                loss = criterion(preds, targets)
                epoch_valid_loss += loss.item()

        # 记录损失
        avg_train_loss = epoch_train_loss / len(train_loader)
        avg_valid_loss = epoch_valid_loss / len(valid_loader)
        train_losses.append(avg_train_loss)
        valid_losses.append(avg_valid_loss)

        print(f"Epoch {epoch + 1} | Train Loss: {avg_train_loss:.4f} | Valid Loss: {avg_valid_loss:.4f}")

        # 保存模型和日志
        if (epoch + 1) % 5 == 0:
            save_checkpoint(model, optimizer, epoch + 1,
                            os.path.join(config['train']['save_dir'], f'model_{epoch + 1}.pth'))

        np.savez(os.path.join(config['train']['log_dir'], 'training_log.npz'),
                 train_loss=np.array(train_losses),
                 valid_loss=np.array(valid_losses))

    # 可视化训练曲线
    plot_training_curves(os.path.join(config['train']['log_dir'], 'training_log.npz'),
                         os.path.join(config['train']['log_dir'], 'training_curves.png'))


if __name__ == '__main__':
    train()