import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential, BatchNorm2d, ReLU, AdaptiveAvgPool2d
from torchvision import transforms

def NCLASSES():
    return 10

#############################################################
# 辅助功能块
#############################################################

class ResizeLayer(nn.Module):
    ''' 该模块用于对(h,w)进行缩放功能
    Args:
        size: 缩放目标大小
    '''
    def __init__(self, size=(224, 224)):
        super(ResizeLayer, self).__init__()
        self.size = size

    def forward(self, x):
        return F.interpolate(x, size=self.size, mode='bilinear', align_corners=False)

def VggBlock(num_convs, in_channels, out_channels):
    """ VGG网络块 
    Args:
        num_convs: 网络层数，一个网络层包含1个卷积层+1个激活层，
                    最后加上一层最大汇聚层MaxPool2d
        in_channels: 输入通道数
        out_channels: 输出通道数
    Return:
        返回nn.Sequential()结构的网络
    """
    layers = []
    for v in range(num_convs):
        layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
        layers.append(nn.ReLU())
        in_channels = out_channels
    layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
    net = nn.Sequential(*layers)
    return net
## end def VggBlock

class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        ''' 残差块 
        Args:
            in_channels: 输入通道数
            out_channels: 输出通道数
        '''
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.activate = F.relu
        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut.add_module('sc_conv', nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False))
            self.shortcut.add_module('sc_bn', nn.BatchNorm2d(out_channels))

    def forward(self, x):
        out = self.activate(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        self.activate(out)
        return out

def NinBlock(in_channels, out_channels, kernel_size, strides, padding):
    """ NIN网络块
        后2个卷积层用于对每个像素点在通道空间上进行加权和
    Args:
        in_channels: 输入通道数
        out_channels: 输出通道数
        kernel_size: 卷积核大小
        strides: 步长
        padding: 填充
	"""
    net = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),
        nn.ReLU(),
        nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),
        nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())
    return net

class Inception(nn.Module):
    def __init__(self, in_channels, c1, c2, c3, c4):
        """ Inception网络块
        Args: 
            in_channels: 输入通道数
            c1-c4: 每条路径的输出通道数, 
                   注意c2/c3有两层，因此需要以列表或元组分别定义每层的输出通道数
        ex:
            Inception(3, 1, (1, 2), (1, 2), 1)
            输入通道数为3, 4个并行层输出通道数分别为1/2/2/1, 因此最后块输出通道数为1+2+2+1=6
        """
        super(Inception, self).__init__()
        # 路径1，单1x1卷积层
        self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
        # 路径2，1x1卷积层后接3x3卷积层
        self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
        self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
        # 路径3，1x1卷积层后接5x5卷积层
        self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
        self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
        # 路径4，3x3最大汇聚层后接1x1卷积层
        self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)

    def forward(self, X):
        p1_1 = F.relu(self.p1_1(X))
        p2_1 = F.relu(self.p2_1(X))
        p2_2 = F.relu(self.p2_2(p2_1))
        p3_1 = F.relu(self.p3_1(X))
        p3_2 = F.relu(self.p3_2(p3_1))
        p4_1 = F.relu(self.p4_1(X))
        p4_2 = F.relu(self.p4_2(p4_1))
        Y = torch.cat((p1_1, p2_2, p3_2, p4_2), dim=1)
        return Y
## end class Inception

#############################################################
# 模型1 LeNet: lenet
#############################################################
def lenet():
    num_classes = NCLASSES()
    lenet = nn.Sequential(
        # h=(h+2p-k)/s+1, w=(w+2p-k)/s+1
        ResizeLayer(size=(96, 96)),
        nn.Conv2d(in_channels=3, out_channels=6, kernel_size=(5,5), stride=1, padding=2),
        nn.Sigmoid(),
        nn.AvgPool2d(kernel_size=(2,2), stride=2),
        nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(3,3), stride=1, padding=1),
        nn.Sigmoid(),
        nn.AvgPool2d(kernel_size=(2,2), stride=2),
        nn.Flatten(),
        nn.Linear(in_features=16*24*24, out_features=120),
        nn.Sigmoid(),
        nn.Linear(in_features=120, out_features=84),
        nn.Sigmoid(),
        nn.Linear(in_features=84, out_features=num_classes)
    )
    return lenet
## end def

#############################################################
# 不同损失函数示例
#############################################################

def cross_entropy_loss():
    ''' 交叉熵损失 '''
    loss_fn = nn.functional.cross_entropy
    return loss_fn

def custom_nll_loss():
    ''' 负对数似然损失 '''
    class CustomNLLLoss(nn.Module):
        def __init__(self):
            super(CustomNLLLoss, self).__init__()
        def forward(self, y_hat, y):
            log_probs = F.log_softmax(y_hat, dim=-1)
            y = y.squeeze()
            loss = -torch.mean(log_probs.gather(1, y.unsqueeze(1)).squeeze(1))
            return loss
    loss_fn = CustomNLLLoss()
    return loss_fn

def image_label_mse_loss():
    ''' 使用标签做MSE计算损失 '''
    num_classes = NCLASSES()
    class ImageLabelMSELoss(nn.Module):
        def __init__(self):
            super(ImageLabelMSELoss, self).__init__()
            self.criterion = nn.MSELoss()
        def forward(self, y_hat, y):
            log_probs = F.log_softmax(y_hat, dim=-1) # softmax转换为概率
            y_onthot = F.one_hot(y.view(-1,1), num_classes=num_classes) # 转换为独热编码
            loss = self.criterion(log_probs, y_onthot.float()) # 使用mse计算损失
            return loss
    loss_fn = ImageLabelMSELoss()
    return loss_fn

#####################################################################################
# 填充API接口：模型、损失函数、优化器、学习率调度器
#####################################################################################

def your_model():
    ''' API接口：定义模型 
    训练器调用格式: 
        model = your_model()
        y_hat = model(x)
    其中:
        x: 批量输入, shape=(batch, c, h, w), 这里通道数c=3
    '''
    model = lenet()
    return model

def your_loss():
    ''' API接口：定义损失函数 
    训练器调用格式: 
        loss_fn = your_loss()
        loss = loss_fn(y_hat, y)
    其中: 
        y_hat: 你的模型输出
        y: 标签, shape=(batch)
    '''
    loss_fn = cross_entropy_loss()
    #loss_fn = custom_nll_loss()
    #loss_fn = image_label_mse_loss()
    return loss_fn

def your_optimizer(model):
    ''' API接口：定义优化器 '''
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0005, weight_decay=1e-4)
    return optimizer

def your_scheduler(optimizer):
    ''' API接口：定义学习率调整器 '''
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
    return scheduler


