import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, OneCycleLR, MultiStepLR
from tqdm import tqdm
import matplotlib.pyplot as plt
from thop import profile, clever_format
import time
import os
import argparse
import math
from hornet import hornet_tiny_7x7, hornet_tiny_gf


def get_data_loaders(batch_size=128):
    # 优化的数据增强策略
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.05),
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761)),
        transforms.RandomErasing(p=0.1, scale=(0.02, 0.33)),  # 轻微的随机擦除
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
    testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)

    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
    return trainloader, testloader

def evaluate(model, dataloader, device):
    model.eval()
    correct, total = 0, 0
    loss_sum = 0
    criterion = nn.CrossEntropyLoss()
    with torch.no_grad():
        for inputs, targets in dataloader:
            inputs, targets = inputs.to(device, non_blocking=True), targets.to(device, non_blocking=True)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss_sum += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
    return loss_sum / len(dataloader), correct / total

def plot_curves(train_losses, train_accs, val_losses, val_accs, lrs, model_name):
    epochs = range(1, len(val_losses) + 1)
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 3, 1)
    # plt.plot(epochs, train_losses, 'b', label='Train Loss')
    plt.plot(epochs, val_losses, 'r', label='Val Loss')
    plt.title('Loss Curves')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    plt.subplot(1, 3, 2)
    # plt.plot(epochs, train_accs, 'b', label='Train Acc')
    plt.plot(epochs, val_accs, 'r', label='Val Acc')
    plt.title('Accuracy Curves')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.subplot(1, 3, 3)
    plt.plot(epochs, lrs, 'g', label='Learning Rate')
    plt.title('Learning Rate Schedule')
    plt.xlabel('Epoch')
    plt.ylabel('LR')
    plt.yscale('log')
    plt.legend()

    os.makedirs("results", exist_ok=True)
    plt.tight_layout()
    plt.savefig(f"results/{model_name}_training_curves.png", dpi=300)
    plt.close()

def profile_model(model, device, model_name):
    dummy_input = torch.randn(1, 3, 32, 32).to(device)
    macs, params = profile(model, inputs=(dummy_input, ), verbose=False)
    macs, params = clever_format([macs, params], "%.3f")

    model.eval()
    with torch.no_grad():
        dummy_batch = torch.randn(128, 3, 32, 32).to(device)
        if torch.cuda.is_available():
            torch.cuda.synchronize()
        start = time.time()
        _ = model(dummy_batch)
        if torch.cuda.is_available():
            torch.cuda.synchronize()
        end = time.time()
        infer_time = end - start

    print(f" [{model_name}] Params: {params}, FLOPs: {macs}, Inference time: {infer_time:.4f}s")
    return params, macs, infer_time

def get_model(model_name, num_classes):
    if model_name == "GhostOnlyResNet":
        from ghost_only_resnet import resnet50
        return resnet50(num_classes=num_classes)
    elif model_name == "HorNet":
        return hornet_tiny_7x7(num_classes=num_classes)
    elif model_name == "HorNet_GF":
        return hornet_tiny_gf(num_classes=num_classes)
    else:
        raise ValueError(f"Unknown model: {model_name}")

# 标签平滑损失函数
class LabelSmoothingCrossEntropy(nn.Module):
    def __init__(self, smoothing=0.1):
        super(LabelSmoothingCrossEntropy, self).__init__()
        self.smoothing = smoothing
        
    def forward(self, x, target):
        confidence = 1. - self.smoothing
        logprobs = F.log_softmax(x, dim=-1)
        nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
        nll_loss = nll_loss.squeeze(1)
        smooth_loss = -logprobs.mean(dim=-1)
        loss = confidence * nll_loss + self.smoothing * smooth_loss
        return loss.mean()

def train_epoch(model, trainloader, optimizer, criterion, device, epoch, scheduler=None):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0
    
    pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}')
    for batch_idx, (inputs, targets) in enumerate(pbar):
        inputs, targets = inputs.to(device, non_blocking=True), targets.to(device, non_blocking=True)
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()
        
        # 如果使用OneCycleLR，每个batch都要调用step
        if scheduler is not None and hasattr(scheduler, 'step_every_batch'):
            if scheduler.step_every_batch:
                scheduler.step()
        
        running_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        # 更新进度条
        if batch_idx % 50 == 0:
            pbar.set_postfix({
                'Loss': f'{running_loss/(batch_idx+1):.4f}',
                'Acc': f'{100.*correct/total:.2f}%',
                'LR': f'{optimizer.param_groups[0]["lr"]:.6f}'
            })
    
    return running_loss / len(trainloader), correct / total

def train(model_name):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device:", device)
    
    # 优化训练超参数
    num_epochs = 100
    batch_size = 128  # 减小batch size，增加梯度更新频率
    base_lr = 0.000322   # 适中的学习率，避免太激进
    weight_decay = 1e-4

    trainloader, testloader = get_data_loaders(batch_size)
    model = get_model(model_name, num_classes=100).to(device)
    
    # 改进模型初始化
    def init_weights(m):
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.Linear):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            nn.init.constant_(m.bias, 0)
        elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)
    
    model.apply(init_weights)
    
    model_name_str = model.__class__.__name__

    # 使用轻微标签平滑
    criterion = LabelSmoothingCrossEntropy(smoothing=0.05)
    
    # 使用AdamW优化器
    optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=weight_decay, betas=(0.9, 0.999))
    
    # 使用更温和的学习率调度
    scheduler = OneCycleLR(
        optimizer,
        max_lr=base_lr * 10,  # 最大学习率是基础学习率的10倍
        epochs=num_epochs,
        steps_per_epoch=len(trainloader),
        pct_start=0.2,  # 20%的时间用于warmup
        anneal_strategy='cos',
        div_factor=10,  # 初始lr = max_lr / div_factor
        final_div_factor=100  # 最终lr = max_lr / final_div_factor
    )
    scheduler.step_every_batch = True  # 标记这是每个batch都调用的调度器
    # optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=weight_decay)
    # scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs, eta_min=1e-5)

    best_acc = 0
    train_losses, train_accs = [], []
    val_losses, val_accs = [], []
    learning_rates = []
    patience = 20
    patience_counter = 0

    print("Profiling model...")
    params, flops, infer_time = profile_model(model, device, model_name_str)

    for epoch in range(num_epochs):
        # 记录当前学习率
        current_lr = optimizer.param_groups[0]['lr']
        learning_rates.append(current_lr)
        
        # 训练一个epoch
        train_loss, train_acc = train_epoch(model, trainloader, optimizer, criterion, device, epoch, scheduler)
        
        # 验证
        val_loss, val_acc = evaluate(model, testloader, device)
        
        train_losses.append(train_loss)
        train_accs.append(train_acc)
        val_losses.append(val_loss)
        val_accs.append(val_acc)

        print(f"[{model_name_str}] Epoch {epoch+1}: LR={current_lr:.6f}, "
              f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, "
              f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")

        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            patience_counter = 0
            os.makedirs("results", exist_ok=True)
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'best_acc': best_acc,
            }, f"results/best_{model_name_str}.pth")
            print(f" ✓ Best model saved with acc: {best_acc:.4f}")
        else:
            patience_counter += 1

        # 早停策略
        if patience_counter >= patience and epoch > 40:
            print(f"Early stopping at epoch {epoch+1}")
            break

    plot_curves(train_losses, train_accs, val_losses, val_accs, learning_rates, model_name_str)
    print(f"Training completed. Best validation accuracy: {best_acc:.4f}")

def find_lr(model, trainloader, optimizer, criterion, device, init_value=1e-6, final_value=1., beta=0.98):
    num = len(trainloader) - 1
    mult = (final_value / init_value) ** (1/num)
    lr = init_value
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    avg_loss = 0.
    best_loss = float('inf')
    batch_num = 0
    losses = []
    log_lrs = []

    model.train()
    for inputs, targets in tqdm(trainloader, desc=" Finding LR"):
        batch_num += 1

        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # 平滑loss
        avg_loss = beta * avg_loss + (1 - beta) * loss.item()
        smoothed_loss = avg_loss / (1 - beta ** batch_num)

        # 记录最小loss
        if smoothed_loss < best_loss or batch_num == 1:
            best_loss = smoothed_loss

        # 提前停止
        if smoothed_loss > 4 * best_loss:
            break

        losses.append(smoothed_loss)
        log_lrs.append(math.log10(lr))

        loss.backward()
        optimizer.step()

        lr *= mult
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # 找到最优学习率
    min_loss_idx = losses.index(min(losses))
    suggested_lr = 10**log_lrs[min_loss_idx]
    
    # 建议的学习率通常应该比最小loss对应的lr小一个数量级
    final_suggested_lr = suggested_lr / 10
    
    print(f" Suggested LR (min loss): {suggested_lr:.6f}")
    print(f" Recommended LR (safer): {final_suggested_lr:.6f}")

    # 绘图
    plt.figure(figsize=(10, 6))
    plt.plot(log_lrs, losses)
    plt.axvline(x=math.log10(suggested_lr), color='r', linestyle='--', label=f'Min Loss LR: {suggested_lr:.6f}')
    plt.axvline(x=math.log10(final_suggested_lr), color='g', linestyle='--', label=f'Recommended LR: {final_suggested_lr:.6f}')
    plt.xlabel("Log Learning Rate")
    plt.ylabel("Smoothed Loss")
    plt.title("Learning Rate Finder")
    plt.legend()
    plt.grid(True, alpha=0.3)
    os.makedirs("results", exist_ok=True)
    plt.savefig("results/lr_finder_curve.png", dpi=300)
    plt.close()
    print(" Saved LR curve to results/lr_finder_curve.png")
    
    return final_suggested_lr

def set_bn_eval(m):
    if isinstance(m, nn.BatchNorm2d):
        m.eval()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='HorNet',
                        choices=['VanillaResNet', 'GhostHornetResNet', 'GhostOnlyResNet', 'HornetOnlyResNet','HorNet', 'HorNet_GF'],
                        help='选择模型结构')
    parser.add_argument('--find_lr', action='store_true', help='是否执行学习率搜索')
    args = parser.parse_args()

    if args.find_lr:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        trainloader, _ = get_data_loaders(batch_size=128)
        model = get_model(args.model, num_classes=100).to(device)
        model.apply(set_bn_eval)
        optimizer = optim.AdamW(model.parameters(), lr=1e-5, weight_decay=1e-4)
        criterion = nn.CrossEntropyLoss()
        optimal_lr = find_lr(model, trainloader, optimizer, criterion, device, init_value=1e-6, final_value=1.0)
        print(f"\n建议在train函数中设置base_lr = {optimal_lr:.6f}")
    else:
        train(args.model)