from models import AlexNet, vgg, resnet, resnext, googlenet, attention, res2net
from torch.optim.lr_scheduler import _LRScheduler
from models.ResidualAttentionNetwork import residual_attention_network
import numpy as np

def getNetwork(neuralNetwork_name):
    neuralNetwork_name = neuralNetwork_name.upper()

    if neuralNetwork_name == 'VGG11':
        return vgg.vgg11_bn()
    elif neuralNetwork_name == 'VGG13':
        return vgg.vgg13_bn()
    elif neuralNetwork_name == 'VGG16':
        return vgg.vgg16_bn()
    elif neuralNetwork_name == 'VGG19':
        return vgg.vgg19_bn()
    elif neuralNetwork_name == 'ALEXNET':
        return AlexNet.alexnet()
    elif neuralNetwork_name == 'RESNET18':
        return resnet.resnet18()
    elif neuralNetwork_name == 'RESNET34':
        return resnet.resnet34()
    elif neuralNetwork_name == 'RESNET50':
        return resnet.resnet50()
    elif neuralNetwork_name == 'RESNET101':
        return resnet.resnet101()
    elif neuralNetwork_name == 'RESNET152':
        return resnet.resnet152()
    elif neuralNetwork_name == 'RESNEXT50':
        return resnext.resnext50()
    elif neuralNetwork_name == 'RESNEXT101':
        return resnext.resnext101()
    elif neuralNetwork_name == 'RESNEXT152':
        return resnext.resnext152()
    elif neuralNetwork_name == 'GOOGLENET':
        return googlenet.googlenet()
    elif neuralNetwork_name == 'ATTENTION92':
        return residual_attention_network.ResidualAttentionModel_92_32input_update()
    elif neuralNetwork_name == 'RES2NEXT29_6C_24W_4S':
        return res2net.res2next29_6cx24wx4scale()
    elif neuralNetwork_name == 'RES2NEXT29_8C_25W_4S':
        return res2net.res2next29_8cx25wx4scale()
    elif neuralNetwork_name == 'RES2NEXT29_6C_24W_4S_SE':
        return res2net.res2next29_6cx24wx4scale_se()
    else:
        raise Exception("not support neural network:" + neuralNetwork_name)


class WarmUpLR(_LRScheduler):
    """warmup_training learning rate scheduler
    Args:
        optimizer: optimzier(e.g. SGD)
        total_iters: totoal_iters of warmup phase
    """

    def __init__(self, optimizer, total_iters, last_epoch=-1):
        self.total_iters = total_iters
        super().__init__(optimizer, last_epoch)

    def get_lr(self):
        """
        we will use the first m batches, and set the learning
        rate to base_lr * m / total_iters

        get_lr()的功能就是获取当前 epoch，该参数组的学习率
        """
        return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]


def get_model_params(model):
    # 定义总参数量、可训练参数量及非可训练参数量变量
    Total_params = 0
    Trainable_params = 0
    NonTrainable_params = 0

    # 遍历model.parameters()返回的全局参数列表
    for param in model.parameters():
        mulValue = np.prod(param.size())  # 使用numpy prod接口计算参数数组所有元素之积
        Total_params += mulValue  # 总参数量
        if param.requires_grad:
            Trainable_params += mulValue  # 可训练参数量
        else:
            NonTrainable_params += mulValue  # 非可训练参数量

    # print(f'Total params: {Total_params}')
    # print(f'Trainable params: {Trainable_params}')
    # print(f'Non-trainable params: {NonTrainable_params}')
    return Total_params, Trainable_params, NonTrainable_params


