import torch
import torchaudio
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard.writer import SummaryWriter
from datetime import datetime

from CSBSmodel import CSBSModel
from dataset import DMERdataset

from baselines import LSTMRegressor

BATCH_SIZE = 1
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SAMPLE_RATE = 44100

transform_mfcc = torchaudio.transforms.MFCC(
            sample_rate=SAMPLE_RATE,
            n_mfcc=30,
            log_mels=True,
            melkwargs={"n_mels": 128, "n_fft": 2048, "hop_length": 512}
        )
test_dataset = DMERdataset(
    "DMER2_0\\datasets\\test\\dmer_annotations(std).csv",
    "DMER2_0\\datasets\\test\\chorus",
    transform=transform_mfcc,
    target_sample_rate=SAMPLE_RATE,
    label_type="V"  # 或 "V"，根据你要测试的类型
)
test_dataloader = DataLoader(test_dataset, batch_size=1)

# model = CSBSModel(1, 32, 3, 128, 1).to(DEVICE)
sample_inputs, _ = next(iter(test_dataloader))
sample_inputs = sample_inputs.squeeze(1)  # [batch, seq_len, n_mfcc, time_steps]
batch, seq_len, n_mfcc, time_steps = sample_inputs.shape
input_dim = n_mfcc * time_steps
model = LSTMRegressor(input_dim=input_dim, hidden_dim=64, num_layers=1, output_dim=1).to(DEVICE)

state_dict = torch.load('DMER2_0\model_logs\V_20250627\CSBSmodel_V_epoch12.pth', weights_only=True)
model.load_state_dict(state_dict)
model.eval() 

loss_fn = nn.MSELoss()
total_loss = 0.0
test_step = 0

with torch.no_grad():
    from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
    import numpy as np
    
    outputs_list = []
    labels_list = []
    
    for inputs, labels in test_dataloader:
        labels = labels.squeeze(0)

        #CSBS
        # inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
        # outputs = model(inputs)

        #LSTM
        inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
        inputs = inputs.squeeze(1)  # [batch, seq_len, n_mfcc, time_steps]
        batch, seq_len, n_mfcc, time_steps = inputs.shape
        inputs = inputs.reshape(batch, seq_len, n_mfcc * time_steps)  # [batch, seq_len, feature_dim]
        labels = labels.squeeze(0)
        outputs = model(inputs)
        outputs = outputs.squeeze(0)
        # 收集预测和标签
        outputs_list.append(outputs.detach().cpu().numpy())
        labels_list.append(labels.detach().cpu().numpy())
        loss = loss_fn(outputs, labels)
        total_loss += loss.item()
        test_step += 1
        # print(outputs)
    
    # 合并所有batch
    outputs_all = np.concatenate(outputs_list, axis=0)
    labels_all = np.concatenate(labels_list, axis=0)
    
    mse = mean_squared_error(labels_all, outputs_all)
    rmse = mse ** 0.5
    mae = mean_absolute_error(labels_all, outputs_all)
    r2 = r2_score(labels_all, outputs_all)
    
    print(f"RMSE: {rmse:.4f}")
    print(f"MAE: {mae:.4f}")
    print(f"R2: {r2:.4f}")



# TensorBoard记录
date_str = datetime.now().strftime("%Y%m%d")
log_dir = f"logs_test_{date_str}"
writer = SummaryWriter(log_dir)

writer.add_scalar("test/RMSE", rmse, 0)
writer.add_scalar("test/MAE", mae, 0)
writer.add_scalar("test/R2", r2, 0)


writer.close()
