import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from torchvision.transforms import RandAugment
from torch.optim import lr_scheduler
import numpy as np
import time
import copy
import matplotlib.pyplot as plt

def get_data_loader_plain():    
    transform_train = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    

    trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
                                             download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                                            download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=128,
                                             shuffle=False, num_workers=2)
        # 实例化模型
    return trainloader,testloader

def get_data_loader():    
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        RandAugment(),  # 使用 RandAugment
        transforms.ToTensor(),
        #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        transforms.Normalize(
            (0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
            (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
        )
    ])
    
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    

    trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
                                             download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                                            download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=128,
                                             shuffle=False, num_workers=2)
        # 实例化模型
    return trainloader,testloader


def plot_training_results(train_losses, train_top1_accs, train_top5_accs, val_losses, val_top1_accs, val_top5_accs):
    # Convert PyTorch tensors to NumPy arrays
    train_losses = np.array(train_losses)
    train_top1_accs = np.array(train_top1_accs)
    train_top5_accs = np.array(train_top5_accs)
    val_losses = np.array(val_losses)
    val_top1_accs = np.array(val_top1_accs)
    val_top5_accs = np.array(val_top5_accs)

    plt.figure(figsize=(15, 5))
    
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='训练损失')
    plt.plot(val_losses, label='验证损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('训练/验证损失曲线')
    plt.legend()
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_top1_accs, label='训练 Top-1 准确率')
    plt.plot(val_top1_accs, label='验证 Top-1 准确率')
    plt.plot(train_top5_accs, label='训练 Top-5 准确率')
    plt.plot(val_top5_accs, label='验证 Top-5 准确率')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.title('训练/验证准确率曲线')
    plt.legend()
    
    plt.tight_layout()
    plt.show()


# 训练模型
def train_model(model,trainloader,device,testloader, criterion, optimizer, scheduler, num_epochs=25):
    since = time.time()
       
    train_losses = []
    train_top1_accs = []
    train_top5_accs = []
    val_losses = []
    val_top1_accs = []
    val_top5_accs = []


    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
 
    for epoch in range(num_epochs):
        print('第 {}/{} 轮训练'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # 每个epoch都有训练和验证阶段
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # 设置模型为训练模式
            else:
                model.eval()   # 设置模型为评估模式

            running_loss = 0.0
            running_corrects = 0
            running_top1_corrects = 0
            running_top5_corrects = 0

            # 遍历数据
            for inputs, labels in (trainloader if phase == 'train' else testloader):
                inputs = inputs.to(device)
                labels = labels.to(device)

                # 零化参数梯度
                optimizer.zero_grad()

                # 前向传播
                # 只在训练阶段追踪历史
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    # 计算 top-1 和 top-5 准确率
                    _, top5_preds = torch.topk(outputs, 5, dim=1)
                    top1_correct = torch.sum(preds == labels.data)
                    top5_correct = torch.sum(top5_preds == labels.view(-1, 1))

                    # 只有在训练阶段反向传播+优化
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # 统计
                running_loss += loss.item() * inputs.size(0)
                running_corrects += top1_correct
                running_top1_corrects += top1_correct
                running_top5_corrects += top5_correct

            if phase == 'train':
                scheduler.step()

            epoch_loss = running_loss / len(trainloader.dataset if phase == 'train' else testloader.dataset)
            epoch_top1_acc = running_top1_corrects.double() / len(trainloader.dataset if phase == 'train' else testloader.dataset)
            epoch_top5_acc = running_top5_corrects.double() / len(trainloader.dataset if phase == 'train' else testloader.dataset)

            print('{} 损失: {:.4f} Top-1 准确率: {:.4f} Top-5 准确率: {:.4f}'.format(
                phase, epoch_loss, epoch_top1_acc, epoch_top5_acc))
            
            # 保存损失和准确率
            if phase == 'train':
                train_losses.append(epoch_loss)
                train_top1_accs.append(epoch_top1_acc)
                train_top5_accs.append(epoch_top5_acc)
            else:
                val_losses.append(epoch_loss)
                val_top1_accs.append(epoch_top1_acc)
                val_top5_accs.append(epoch_top5_acc)

            # 深度复制模型
            if phase == 'val' and epoch_top1_acc > best_acc:
                best_acc = epoch_top1_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print('训练完成，耗时 {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('最佳验证集准确率: {:4f}'.format(best_acc))

    # 加载最佳模型权重
    model.load_state_dict(best_model_wts)
    
    # 绘制训练结果曲线

    return model,(train_losses, train_top1_accs, train_top5_accs, val_losses, val_top1_accs, val_top5_accs)


