import torch
import torchaudio
from torch import optim
from torch.utils.data import DataLoader
import torch.nn as nn
from torch.utils.tensorboard.writer import SummaryWriter
import matplotlib.pyplot as plt
import os
from datetime import datetime

from CSBSmodel import CSBSModel
from dataset import DMERdataset

from baselines import LSTMRegressor


BATCH_SIZE = 16    
EPOCHS = 50
LEARNING_RATE = 0.0001
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SAMPLE_RATE = 44100

# 宏定义：切换训练类型
TRAIN_TYPE = "V"  # 可选 "A" 或 "V"


def plot_metrics(metrics_history):
    plt.figure(figsize=(12, 4))
    for metric_name in metrics_history:
        plt.plot(metrics_history[metric_name], label=metric_name)
    plt.title('Training Metrics')
    plt.xlabel('Steps')
    plt.ylabel('Value')
    plt.legend()
    plt.savefig('metrics_comparison.png')
    plt.close()


## 读取模型
# model = net()
# state_dict = torch.load('model_name.pth')
# model.load_state_dict(state_dict['model'])



def main():
    exp_time = datetime.now().strftime("%Y%m%d")
    log_dir = f"DMER2_0/logs_train/{TRAIN_TYPE}_{exp_time}"
    model_dir = f"DMER2_0/model_logs/{TRAIN_TYPE}_{exp_time}"
    os.makedirs(log_dir, exist_ok=True)
    os.makedirs(model_dir, exist_ok=True)
    writer = SummaryWriter(log_dir)

    transform_mfcc = torchaudio.transforms.MFCC(
        sample_rate=SAMPLE_RATE,
        n_mfcc=30,
        log_mels=True,
        melkwargs={"n_mels": 128, "n_fft": 2048, "hop_length": 512}
    )

    # 只加载一种类型的数据
    train_dataset = DMERdataset(
        "D:/Learning materials/Yan2/声光智能控制/GA-BP2/DMER2_0/datasets/train/dmer_annotations(std).csv",
        "D:/Learning materials/Yan2/声光智能控制/GA-BP2/DMER2_0/datasets/train/chorus",
        transform=transform_mfcc,
        target_sample_rate=SAMPLE_RATE,
        label_type=TRAIN_TYPE
    )
    train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE)

    # 获取一个 batch 的输入 shape
    sample_inputs, _ = next(iter(train_dataloader))
    sample_inputs = sample_inputs.squeeze(1)  # [batch, seq_len, n_mfcc, time_steps]
    batch, seq_len, n_mfcc, time_steps = sample_inputs.shape
    input_dim = n_mfcc * time_steps
    model = LSTMRegressor(input_dim=input_dim, hidden_dim=64, num_layers=1, output_dim=1).to(DEVICE)
    # model = CSBSModel(1, 32, 3, 128, 1).to(DEVICE)

    #使用tensorboard自动可视化模型结构
    dummy_input = torch.randn(batch, seq_len, n_mfcc * time_steps).to(DEVICE)
    writer.add_graph(model, dummy_input)

    loss_fn = nn.SmoothL1Loss(beta=1.0)
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)

    total_train_step = 0

    print_device_info(model, loss_fn, train_dataloader, DEVICE)

    model.train()
    for epoch in range(EPOCHS):
        total_loss = 0.0

        for batch_idx, (inputs, labels) in enumerate(train_dataloader):
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
            inputs = inputs.squeeze(1)  # [batch, seq_len, n_mfcc, time_steps]
            batch, seq_len, n_mfcc, time_steps = inputs.shape
            inputs = inputs.reshape(batch, seq_len, n_mfcc * time_steps)  # [batch, seq_len, feature_dim]
            labels = labels.squeeze(0)
            outputs = model(inputs)
            outputs = outputs.squeeze(0) 
            loss = loss_fn(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_train_step += 1
            total_loss += loss.item()

            if total_train_step % 100 == 0:
                writer.add_scalar("train_loss", loss.item(), total_train_step)
                writer.add_scalar("learning_rate", LEARNING_RATE, total_train_step)  # 新增学习率跟踪
                
            # print(f"训练次数：{total_train_step}, loss: {loss.item()}")

        # 添加epoch级日志记录
        avg_loss = total_loss / len(train_dataloader)
        print(f"Epoch {epoch+1}/{EPOCHS} - Avg Loss: {avg_loss}")
        writer.add_scalar("epoch_loss", avg_loss, epoch)

        # 保存模型
        torch.save(model.state_dict(), f"{model_dir}/CSBSmodel_{TRAIN_TYPE}_epoch{epoch}.pth")
        print("Model Saved.")

    writer.close()

def print_device_info(model, loss_fn, dataloader, device):
    print(f"\n设备位置信息:")
    print(f"当前使用设备: {device}")
    print(f"模型: {next(model.parameters()).device}")

    # 通过测试损失函数的实际计算来确认设备
    test_input = torch.randn(1, 2).to(device)
    test_target = torch.randn(1, 2).to(device)
    test_loss = loss_fn(test_input, test_target)
    print(f"损失函数计算结果在: {test_loss.device}")

    # 检查数据加载器中的第一个批次
    inputs, labels = next(iter(dataloader))
    print(f"输入数据: {inputs.device}")
    print(f"标签数据: {labels.device}")

if __name__ == "__main__":
    main()