import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
# from ghost_only_resnet import GhostOnlyResNet
# from vanilla_resnet import VanillaResNet
from tqdm import tqdm
import matplotlib.pyplot as plt
from thop import profile, clever_format
import time
import os
import argparse
import math

def get_data_loaders(batch_size=256):

    transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.RandAugment(num_ops=2, magnitude=9),
    transforms.ToTensor(),
    transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761)),
    transforms.RandomErasing(p=0.25, scale=(0.02, 0.2), ratio=(0.3, 3.3), value=0),
])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
    testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)

    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8)
    return trainloader, testloader

def evaluate(model, dataloader, device):
    model.eval()
    correct, total = 0, 0
    loss_sum = 0
    criterion = nn.CrossEntropyLoss()
    with torch.no_grad():
        for inputs, targets in dataloader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss_sum += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
    return loss_sum / len(dataloader), correct / total

def plot_curves(val_losses, val_accs, model_name):
    epochs = range(1, len(val_losses) + 1)
    plt.figure(figsize=(10, 4))

    plt.subplot(1, 2, 1)
    plt.plot(epochs, val_losses, 'r', label='Val Loss')
    plt.title('Val Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(epochs, val_accs, 'r', label='Val Acc')
    plt.title('Val Accuracy Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    os.makedirs("results", exist_ok=True)
    plt.savefig(f"results/{model_name}_val_curves.png")
    plt.close()

def profile_model(model, device, model_name):
    dummy_input = torch.randn(1, 3, 32, 32).to(device)
    macs, params = profile(model, inputs=(dummy_input, ), verbose=False)
    macs, params = clever_format([macs, params], "%.3f")

    model.eval()
    with torch.no_grad():
        dummy_batch = torch.randn(128, 3, 32, 32).to(device)
        start = time.time()
        _ = model(dummy_batch)
        end = time.time()
        infer_time = end - start

    print(f" [{model_name}] Params: {params}, FLOPs: {macs}, Inference time : {infer_time:.4f}s")
    return params, macs, infer_time

def get_model(model_name, num_classes):
    '''if model_name == "VanillaResNet":
        return VanillaResNet(num_classes=num_classes)
    elif model_name == "GhostHornetResNet":
        return GhostHornetResNet(num_classes=num_classes)
    elif model_name == "GhostOnlyResNet":
        from ghost_only_resnet import GhostOnlyResNet
        return GhostOnlyResNet(num_classes=num_classes)
    elif model_name == "HornetOnlyResNet":
        from hornet_only_resnet import HornetOnlyResNet
        return HornetOnlyResNet(num_classes=num_classes)'''
    if model_name == "GhostOnlyResNet":
        from ghost_only_resnet import resnet50
        return resnet50(num_classes=num_classes)
    else:
        raise ValueError(f"Unknown model: {model_name}")

def find_lr(model, trainloader, optimizer, criterion, device, init_value=1e-6, final_value=10., beta=0.98):
    num = len(trainloader) - 1
    mult = (final_value / init_value) ** (1/num)
    lr = init_value
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    avg_loss = 0.
    best_loss = float('inf')
    batch_num = 0
    losses = []
    log_lrs = []

    model.train()
    for inputs, targets in tqdm(trainloader, desc=" Finding LR"):
        batch_num += 1

        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # 平滑loss
        avg_loss = beta * avg_loss + (1 - beta) * loss.item()
        smoothed_loss = avg_loss / (1 - beta ** batch_num)

        # 记录最小loss
        if smoothed_loss < best_loss or batch_num == 1:
            best_loss = smoothed_loss

        # 提前停止
        if smoothed_loss > 4 * best_loss:
            break

        losses.append(smoothed_loss)
        log_lrs.append(math.log10(lr))

        loss.backward()
        optimizer.step()

        lr *= mult
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # 绘图
    plt.plot(log_lrs, losses)
    plt.xlabel("Log Learning Rate")
    plt.ylabel("Smoothed Loss")
    plt.title("Learning Rate Finder")
    os.makedirs("results", exist_ok=True)
    plt.savefig("results/lr_finder_curve.png")
    plt.close()
    print(" Saved LR curve to results/lr_finder_curve.png")


def train(model_name):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device:", device)
    num_epochs = 100
    batch_size = 128
    learning_rate = 0.005
    warmup_epochs = 5  # 预热轮数

    trainloader, testloader = get_data_loaders(batch_size)
    model = get_model(model_name, num_classes=100).to(device)
    model_name = model.__class__.__name__

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0, momentum=0.9, weight_decay=5e-4)  # warmup从0开始
    scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs - warmup_epochs)

    best_acc = 0
    val_losses = []
    val_accs = []

    print("Profiling model...")
    params, flops, infer_time = profile_model(model, device, model_name)

    for epoch in range(num_epochs):
        model.train()

        # Warmup阶段，线性调整学习率
        if epoch < warmup_epochs:
            warmup_lr = learning_rate * (epoch + 1) / warmup_epochs
            for param_group in optimizer.param_groups:
                param_group['lr'] = warmup_lr
        else:
            # 非warmup阶段，使用scheduler学习率（step在epoch末）
            if epoch == warmup_epochs:
                # warmup结束时，手动设置scheduler的初始状态
                scheduler.last_epoch = 0
            else:
                scheduler.step()

        for inputs, targets in trainloader:
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

        val_loss, val_acc = evaluate(model, testloader, device)
        val_losses.append(val_loss)
        val_accs.append(val_acc)

        current_lr = optimizer.param_groups[0]['lr']
        print(f"[{model_name}] Epoch {epoch+1}: LR={current_lr:.6f}, Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), f"results/best_{model_name}.pth")
            print(f" Best model saved with acc: {best_acc:.4f}")

    plot_curves(val_losses, val_accs, model_name)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='VanillaResNet',
                        choices=['VanillaResNet', 'GhostHornetResNet', 'GhostOnlyResNet', 'HornetOnlyResNet'],
                        help='选择模型结构')
    parser.add_argument('--find_lr', action='store_true', help='是否执行学习率搜索')
    args = parser.parse_args()

    if args.find_lr:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        trainloader, _ = get_data_loaders(batch_size=256)
        model = get_model(args.model, num_classes=100).to(device)
        optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)
        criterion = nn.CrossEntropyLoss()
        import math
        find_lr(model, trainloader, optimizer, criterion, device)
    else:
        train(args.model)

