import matplotlib.pyplot as plt
import os
import torch
import time
from thop import profile, clever_format
import math
from tqdm import tqdm


def plot_metrics( test_losses, test_accs, save_path='./output'):
    epochs = list(range(1, len(test_losses) + 1))
    
    # Loss 图
    plt.figure(figsize=(10, 4))
    plt.subplot(1, 2, 1)
    plt.plot(epochs, test_losses, label='Test Loss')
    plt.title('Loss over Epochs')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # Acc 图
    plt.subplot(1, 2, 2)
    plt.plot(epochs, test_accs, label='Test Acc')
    plt.title('Accuracy over Epochs')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.grid(True)

    # 保存图片
    os.makedirs(save_path, exist_ok=True)
    plt.tight_layout()
    plt.savefig(os.path.join(save_path, 'training_metrics.png'))
    plt.close()




def profile_model(model, device='cuda', model_name='Model'):
    model.to(device)
    dummy_input = torch.randn(1, 3, 32, 32).to(device)

    # 获取模型参数量与 FLOPs
    macs, params = profile(model, inputs=(dummy_input,), verbose=False)
    macs, params = clever_format([macs, params], "%.3f")

    # 测量推理时间
    model.eval()
    with torch.no_grad():
        dummy_batch = torch.randn(128, 3, 32, 32).to(device)
        start = time.time()
        _ = model(dummy_batch)
        end = time.time()
        infer_time = end - start

    print(f" ✅ [{model_name}] Params: {params}, FLOPs: {macs}, Inference Time (128 samples): {infer_time:.4f}s")
    return params, macs, infer_time



def find_lr(model, trainloader, optimizer, criterion, device, init_value=1e-6, final_value=10., beta=0.98):
    import os
    from datetime import datetime

    num = len(trainloader) - 1
    mult = (final_value / init_value) ** (1 / num)
    lr = init_value
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    avg_loss = 0.
    best_loss = float('inf')
    batch_num = 0
    losses = []
    log_lrs = []

    model.train()
    for inputs, targets in tqdm(trainloader, desc="📈 Finding LR"):
        batch_num += 1

        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        avg_loss = beta * avg_loss + (1 - beta) * loss.item()
        smoothed_loss = avg_loss / (1 - beta ** batch_num)

        if smoothed_loss < best_loss or batch_num == 1:
            best_loss = smoothed_loss

        if smoothed_loss > 4 * best_loss:
            break

        losses.append(smoothed_loss)
        log_lrs.append(math.log10(lr))

        loss.backward()
        optimizer.step()

        lr *= mult
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # 📊 可视化 loss vs learning rate
    plt.plot(log_lrs, losses)
    plt.xlabel("Log10 Learning Rate")
    plt.ylabel("Loss")
    plt.title("Finding Best Learning Rate")
    plt.grid(True)

    # ⬇️ 保存图像到当前目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"lr_find_plot_{timestamp}.png"
    plt.savefig(filename)
    print(f"📷 学习率查找图已保存为: {filename}")

    plt.show()

    min_loss_idx = losses.index(min(losses))
    recommended_lr = 10**log_lrs[min_loss_idx] / 10
    print(f"✅ 推荐学习率：{recommended_lr:.2e}")

    return recommended_lr
