import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import pickle
from torchvision.transforms import RandAugment
from torchvision.transforms import AutoAugment, AutoAugmentPolicy
import os



def show_graph(loss_record, graph_name = 'Training loss', y_name = 'Loss'):
    per_recorded = range(1, len(loss_record) + 1)
    plt.figure(figsize=(10, 5))
    plt.plot(per_recorded, loss_record, 'b', label=graph_name)
    plt.title(graph_name)
    plt.xlabel('per_recorded')
    plt.ylabel(y_name)
    plt.legend()
    plt.show()

def cal_acc(testloader, net, device):
  correct = 0
  total = 0
  # since we're not training, we don't need to calculate the gradients for our outputs
  with torch.no_grad():
    for data in testloader:
      images, labels = data[0].to(device), data[1].to(device)  # 将数据移到GPU上
      # calculate outputs by running images through the network
      outputs = net(images)
      # the class with the highest energy is what we choose as prediction
      _, predicted = torch.max(outputs.data, 1)
      total += labels.size(0)
      correct += (predicted == labels).sum().item()
  return correct / total

def show_all_class_acc(classes, testloader, net, device):
    print(cal_acc(testloader, net, device))
    # prepare to count predictions for each class
    correct_pred = {classname: 0 for classname in classes}
    total_pred = {classname: 0 for classname in classes}
    # again no gradients needed
    with torch.no_grad():
        for data in testloader:
            images, labels = data[0].to(device), data[1].to(device)  # 将数据移到GPU上
            outputs = net(images)
            _, predictions = torch.max(outputs, 1)
            # collect the correct predictions for each class
            for label, prediction in zip(labels, predictions):
                if label == prediction:
                    correct_pred[classes[label]] += 1
                total_pred[classes[label]] += 1
    # print accuracy for each class
    for classname, correct_count in correct_pred.items():
        accuracy = 100 * float(correct_count) / total_pred[classname]
        print(f'Accuracy for class: {classname:5s} is {accuracy:.1f} %')




def train(trainloader, net, optimizer, criterion, device, epoch_num = 3, save_model = False, path = None, testloader = None):
  loss_record = []
  for epoch in range(epoch_num):  # loop over the dataset multiple times
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
      # get the inputs; data is a list of [inputs, labels]
      inputs, labels = data[0].to(device), data[1].to(device)  # 将数据移到GPU上

      # zero the parameter gradients
      optimizer.zero_grad()

      # forward + backward + optimize
      outputs = net(inputs)
      loss = criterion(outputs, labels)
      loss.backward()
      optimizer.step()

      # print statistics
      running_loss += loss.item()
      if i % 2000 == 1999:    # print every 2000 mini-batches
          print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
          loss_record.append(running_loss)
          #acc_record.append(cal_acc())
          running_loss = 0.0
    

    if save_model == True:
      filename = path + f'/model_loss_{running_loss:.4f}.pth' if path != None else f'model_loss_{running_loss:.4f}.pth'
      torch.save(net.state_dict(), filename)
      print(f'model_loss_{running_loss:.4f}.pth saved, now acc:')
    if testloader != None:
      print(cal_acc(testloader, net, device), '\n\n')

  print('Finished Training')
  return loss_record, net

def unpickle(file):
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='latin1')
        return dict

def get_strong_dataLoader(batch_size):
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(20),
        transforms.RandomResizedCrop(32, scale=(0.8, 1.0)),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))
    ])
    
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    

    trainset = torchvision.datasets.CIFAR100(root='./', train=True,
                                             download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./', train=False,
                                            download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                             shuffle=False, num_workers=2)
    
    meta = unpickle("./cifar-100-python/meta")
    classes = tuple(meta['fine_label_names'])
    return trainloader, testloader, classes

def get_dataLoader(batch_size):
    transform = transforms.Compose(
    [
        # transforms.RandomHorizontalFlip(),
        # transforms.RandomRotation(20),
        # transforms.RandomResizedCrop(32, scale=(0.8, 1.0)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./', train=True,
                                            download=True, transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                            shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./', train=False,
                                        download=True, transform=transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                            shuffle=False, num_workers=2)
    meta = unpickle("./cifar-100-python/meta")
    classes = tuple(meta['fine_label_names'])
    return trainloader, testloader, classes

def get_dataLoader_RandAugment(batch_size):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        RandAugment(),  # 使用 RandAugment
        transforms.ToTensor(),
        #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        transforms.Normalize(
            (0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
            (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
        )
    ])
    
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    

    trainset = torchvision.datasets.CIFAR100(root='./', train=True,
                                             download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./', train=False,
                                            download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                             shuffle=False, num_workers=2)
    
    meta = unpickle("./cifar-100-python/meta")
    classes = tuple(meta['fine_label_names'])
    return trainloader, testloader, classes

def get_strong_dataLoader_RandAugment(batch_size):
    transform_train = transforms.Compose([
        RandAugment(),  # 使用 RandAugment
        transforms.RandomHorizontalFlip(),  # 随机水平翻转
        transforms.RandomRotation(degrees=15),  # 随机旋转
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),  # 随机颜色抖动
        transforms.RandomResizedCrop(size=32, scale=(0.8, 1.0)),  # 随机裁剪和缩放
        transforms.GaussianBlur(kernel_size=3),  # 使用高斯模糊进行平滑
        transforms.ToTensor(),
        #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        transforms.Normalize(
            (0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
            (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
        )
    ])
    
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    

    trainset = torchvision.datasets.CIFAR100(root='./', train=True,
                                             download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./', train=False,
                                            download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                             shuffle=False, num_workers=2)
    
    meta = unpickle("./cifar-100-python/meta")
    classes = tuple(meta['fine_label_names'])
    return trainloader, testloader, classes


def kk_new_plot_training_results(train_losses, train_top1_accs, train_top5_accs, val_losses, val_top1_accs, val_top5_accs):
    plt.figure(figsize=(15, 5))
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Training Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training/Validation Loss Curve')
    plt.legend()

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_top1_accs, label='Training Top-1 Accuracy')
    plt.plot(val_top1_accs, label='Validation Top-1 Accuracy')
    plt.plot(train_top5_accs, label='Training Top-5 Accuracy')
    plt.plot(val_top5_accs, label='Validation Top-5 Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.title('Training/Validation Accuracy Curve')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

def new_train(model,trainloader,criterion,optimizer,device):
    model.train()
    train_loss=0
    correct=0
    top5_correct=0
    total=0
    for batch_idx,(data,target) in enumerate(trainloader):
        data,target=data.to(device),target.to(device)
        optimizer.zero_grad()
        output=model(data)
        loss=criterion(output,target)
        loss.backward()
        optimizer.step()
        train_loss+=loss.item()
        _,predicted=output.max(1)
        total+=target.size(0)
        correct+=predicted.eq(target).sum().item()
        _,top5_predicted=output.topk(5,1)
        top5_correct+=top5_predicted.eq(target.view(-1,1).expand_as(top5_predicted)).sum().item()
    return train_loss/(batch_idx+1),100.*correct/total,100.*top5_correct/total

def test(model,testloader,criterion,device):
    model.eval()
    test_loss=0
    correct=0
    top5_correct=0
    total=0
    with torch.no_grad():
        for batch_idx,(data,target) in enumerate(testloader):
            data,target=data.to(device),target.to(device)
            output=model(data)
            loss=criterion(output,target)
            test_loss+=loss.item()
            _,predicted=output.max(1)
            total+=target.size(0)
            correct+=predicted.eq(target).sum().item()
            _,top5_predicted=output.topk(5,1)
            top5_correct+=top5_predicted.eq(target.view(-1,1).expand_as(top5_predicted)).sum().item()
    return test_loss/(batch_idx+1),100.*correct/total,100.*top5_correct/total


def save_log(modelname,train_acc,test_acc,train_loss,test_loss,train_top5_correct,test_top5_correct,output_dir='../train_process'):
    if modelname==None:
        modelname='unnamed model'
    output_dir=output_dir+'/'+modelname
    # Create directory if it doesn't exist.
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Accuracy plots.
    with open(os.path.join(output_dir, "train_log.txt"),'w') as f:
        for i in range(len(train_acc)):
            f.write('epoch:{} train_acc:{} test_acc:{} train_loss:{} test_loss:{} train_top5_correct:{} test_top5_correct:{}\n'.format(i,train_acc[i],test_acc[i],train_loss[i],test_loss[i],train_top5_correct[i],test_top5_correct[i]))

def save_model_info(modelname,model,train_acc,test_acc,train_loss,test_loss,train_top5_correct,test_top5_correct,learning_rate,epochnum,momentum,weight_decay,output_dir='../train_process'):
    if modelname==None:
        modelname='unnamed model'
    output_dir=output_dir+'/'+modelname
    # Create directory if it doesn't exist.
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # Accuracy plots.
    with open(os.path.join(output_dir, "model_info.txt"),'w') as f:
        input = torch.randn(1, 3, 32, 32).to('cuda')
        Flops, params = profile(model, inputs=(input,)) # macs
        f.write('Flops:{} params:{}\n'.format(Flops,params))
        f.write('learning_rate:{} epochnum:{} momentum:{} weight_decay:{}\n'.format(learning_rate,epochnum,momentum,weight_decay))
        f.write('train_acc:{} test_acc:{} train_loss:{} test_loss:{} train_top5_correct:{} test_top5_correct:{}\n'.format(train_acc[-1],test_acc[-1],train_loss[-1],test_loss[-1],train_top5_correct[-1],test_top5_correct[-1]))

def check_model_info(model):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    input = torch.randn(1, 3, 32, 32).to(device)
    Flops, params = profile(model, inputs=(input,))  # macs
    print('Flops:{} params:{}\n'.format(Flops, params))

