# @Author : wangyuchen
# @Time : 2021-05-13 10:11


from torchvision import datasets
import sys
import numpy
# lr_scheduler负责学习率调整
from torch.optim.lr_scheduler import _LRScheduler
# transforms对PIL.Image进行变换
import torchvision.transforms as transforms
# 引入数据加载器
from torch.utils.data import DataLoader

from utils.val_dataset import ValDataset, TestDataset, TrainDataset


def get_network(args):
    """ return given network
    """
    if args.net == 'vgg16':
        from models.vgg import vgg16_bn
        net = vgg16_bn()
    elif args.net == 'vgg13':
        from models.vgg import vgg13_bn
        net = vgg13_bn()
    elif args.net == 'vgg11':
        from models.vgg import vgg11_bn
        net = vgg11_bn()
    elif args.net == 'vgg19':
        from models.vgg import vgg19_bn
        net = vgg19_bn()
    elif args.net == 'googlenet':
        from models.googlenet import googlenet
        net = googlenet()
    elif args.net == 'resnet18':
        # from models.resnet import resnet18
        # net = resnet18()
        from torchvision.models.resnet import resnet18
        net = resnet18(True)
    elif args.net == 'resnet34':
        # from models.resnet import resnet34
        from torchvision.models.resnet import resnet34
        net = resnet34(True)
    elif args.net == 'resnet50':
        # from models.resnet import resnet50
        from torchvision.models.resnet import resnet50
        net = resnet50(True)
    elif args.net == 'resnet101':
        # from models.resnet import resnet101
        from torchvision.models.resnet import resnet101
        net = resnet101(True)
    elif args.net == 'resnet152':
        # from models.resnet import resnet152
        from torchvision.models.resnet import resnet152
        net = resnet152(True)
    elif args.net == 'seresnet18':
        from models.senet import seresnet18
        net = seresnet18()
    elif args.net == 'seresnet34':
        from models.senet import seresnet34
        net = seresnet34()
    elif args.net == 'seresnet50':
        from models.senet import seresnet50
        net = seresnet50()
    elif args.net == 'seresnet101':
        from models.senet import seresnet101
        net = seresnet101()
    elif args.net == 'seresnet152':
        from models.senet import seresnet152
        net = seresnet152()
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:  # use_gpu
        net = net.cuda()

    return net


def get_training_dataloader(data_dir, mean, std, batch_size=16, num_workers=2, shuffle=True):
    transform_train = transforms.Compose([
        # transforms.RandomCrop(32, padding=4),
        # transforms.RandomHorizontalFlip(),
        # transforms.RandomRotation(15),
        transforms.Resize(224),
        transforms.RandomHorizontalFlip(),  # 随机水平翻转
        transforms.RandomRotation(degrees=15),  # 随机旋转
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    image_datasets = TrainDataset(data_dir, transform_train)
    dataloaders = DataLoader(image_datasets, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
    return dataloaders


def get_validating_dataloader(data_dir, label_dir, mean, std, batch_size=16, num_workers=2, shuffle=True):
    transform_val = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    # image_datasets = datasets.ImageFolder(data_dir, transform_val)
    image_datasets = ValDataset(annotations_file=label_dir, img_dir=data_dir, transform=transform_val)
    dataloaders = DataLoader(image_datasets, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
    return dataloaders


def get_testing_dataloader(data_dir, mean, std, batch_size=16, num_workers=2, shuffle=True):
    transform_test = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    image_datasets = TestDataset(img_dir=data_dir, transform=transform_test)
    dataloaders = DataLoader(image_datasets, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)
    return dataloaders


def compute_mean_std(data_dir):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.ToTensor(),
    ])
    image_datasets = datasets.ImageFolder(data_dir, transform_train)
    dataset = DataLoader(image_datasets, shuffle=False, num_workers=0, batch_size=1).dataset
    # dataset = images[0]
    # mean = [0, 0, 0]
    # std = [0, 0, 0]
    # for data in dataset:
    #     img = data[0]
    #     for i in range(3):
    #         # 一个通道的均值和标准差
    #         mean[i] += img[i, :, :].mean()
    #         std[i] += img[i, :, :].std()
    # numpy.dstack，将列表中的数组沿深度方向进行拼接
    data_r = numpy.dstack([dataset[i][1][:, :, 0] for i in range(len(dataset))])
    data_g = numpy.dstack([dataset[i][1][:, :, 1] for i in range(len(dataset))])
    data_b = numpy.dstack([dataset[i][1][:, :, 2] for i in range(len(dataset))])
    mean = numpy.mean(data_r), numpy.mean(data_g), numpy.mean(data_b)
    # 标准差
    std = numpy.std(data_r), numpy.std(data_g), numpy.std(data_b)
    return mean, std


class WarmUpLR(_LRScheduler):
    """warmup_training learning rate scheduler
    Args:
        optimizer: optimzier(e.g. SGD)
        total_iters: totoal_iters of warmup phase
    初始阶段：该模型对数据还很陌生，需要使用较小的学习率慢慢学习，不断的修正权重分布
    """

    def __init__(self, optimizer, total_iters, last_epoch=-1):
        self.total_iters = total_iters
        super().__init__(optimizer, last_epoch)

    def get_lr(self):
        """we will use the first m batches, and set the learning
        rate to base_lr * m / total_iters
        """
        return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]


if __name__ == '__main__':
    print(compute_mean_std('../train'))
