import torch
from torch import nn
from utils import datasets
from torchvision import transforms
from torch.utils.data import DataLoader


def get_loader(args):
    if args.dataset == 'cub':
        data_transforms = transforms.Compose([
            transforms.Resize(int(args.image_size / 0.875)),
            transforms.CenterCrop(args.image_size),
            transforms.ToTensor(),
            # 这个是ImageNet的mean和std，适用于所有类型图片
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        trainset = datasets.CUB(data_path=args.data_root, train=True, transform=data_transforms)
        testset = datasets.CUB(data_path=args.data_root, train=False, transform=data_transforms)

    elif args.dataset == 'dogs':
        data_transforms = transforms.Compose([
            transforms.Resize(int(args.image_size / 0.875)),
            transforms.CenterCrop(args.image_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        trainset = datasets.Dogs(data_path=args.data_root, train=True, transform=data_transforms)
        testset = datasets.Dogs(data_path=args.data_root, train=False, transform=data_transforms)

    elif args.dataset == 'aircrafts':
        data_transforms = transforms.Compose([
            transforms.Resize(int(args.image_size / 0.875)),
            transforms.CenterCrop(args.image_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        trainset = datasets.Aircrafts(data_path=args.data_root, train=True, transform=data_transforms)
        testset = datasets.Aircrafts(data_path=args.data_root, train=False, transform=data_transforms)
    else:
        raise Exception("no dataset called cub")

    train_loader = DataLoader(trainset,
                              shuffle=True,
                              batch_size=args.batch_size,
                              num_workers=4,
                              drop_last=True,
                              # 这样将Tensor数据转移到GPU中速度就会快一些，默认为False。但是对于内存的大小要求比较高
                              pin_memory=True)
    test_loader = DataLoader(testset,
                             shuffle=True,
                             batch_size=args.batch_size,
                             num_workers=4,
                             pin_memory=True)

    return train_loader, test_loader


def get_optim(args, model):
    if not args.lr:
        lr = (args.batch_size / 256) * 0.1
    else:
        lr = args.lr

    if args.optim == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
        return optimizer
    elif args.optim == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        return optimizer
    else:
        raise Exception(f"there is no such optimizer named {args.optim}")


def get_lr_scheduler(args, optimizer):
    if not args.lr_scheduler:
        return None
    elif args.lr_scheduler == 'exp':
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        return scheduler
    elif args.lr_scheduler == 'cos':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epoch)
        return scheduler
    else:
        raise Exception(f'no such lr scheduler named {args.lr_scheduler}')


def get_loss_fn(args):
    if args.loss_fn == 'crossentropy':
        return nn.CrossEntropyLoss(label_smoothing=0.1)
    elif args.loss_fn == 'mse':
        return nn.MSELoss()
    else:
        raise Exception(f"there is no such loss function named {args.loss_fn}")
