import torch

"""
神经网络容器
"""
torch.nn.Module #所有神经网络模块的基类
torch.nn.Sequential(*args) #按顺序组合多个模块
torch.nn.ModuleList(modules) #将子模块存储在列表中
torch.nn.ModuleDict(modules) #将子模块存储在字典中
torch.nn.ParameterList(parameters) #将参数存储在列表中
torch.nn.ParamterDict(parameters) #将参数存储在字典中


"""
线性层
"""
torch.nn.Linear(infeatures,out_features) #全连接层
torch.nn.Bilinear(inl_features,in2_features,in3_features,out_features)#双线性层

"""
卷积层
"""
torch.nn.Conv1d(in_channels,out_channels,kernel_size) #一维卷积层
torch.nn.Conv2d(in_channels,out_channels,kernel_size)  #二维卷积层
torch.nn.Conv3d(in_channels,out_channels,kernel_size)  #三维卷积层
torch.nn.ConvTranspose1d(in_channels,out_channels,kernel_size) #一维转置卷积层
torch.nn.ConvTranspose2d(in_channels,out_channels,kernel_size) #二维转置卷积层
torch.nn.ConvTranspose3d(in_channels,out_channels,kernel_size) #三维转置卷积层

"""
池化层
"""
torch.nn.MaxPool1d(kernel_size) #一维最大池化层
torch.nn.MaxPool2d(kernel_size) #二维最大池化层
torch.nn.MaxPool3d(kernel_size) #三维最大池化层
torch.nn.AvgPool1d(kernel_size) #一位平均池化层
torch.nn.AvgPool2d(kernel_size) #二维平均池化层
torch.nn.AvgPool3d(kernel_size) #三维平均池化层
torch.nn.AdaptiveMaxPool1d(output_size) #一维自适应最大池化层
torch.nn.AdaptiveAvgPool1d(output_size) #一维自适应平均池化层
torch.nn.AdaptiveMaxPool2d(output_size) #二维自适应最大池化层
torch.nn.AdaptiveAvgPool2d(output_size) #二维自适应平均池化层
torch.nn.AdaptiveMaxPool3d(output_size) #三维自适应最大池化层
torch.nn.AdaptiveAvgPool3d(output_size) #三维自适应平均池化层

"""
激活函数
"""
torch.nn.ReLU() #relu激活函数
torch.nn.Sigmoid() #Sigmoid激活函数
torch.nn.Tanh() #Tanh激活函数
torch.nn.Softmax() #Softmax函数
torch.nn.LogSoftmax() #logsoftmax函数
torch.nn.LeakyReLU(negative_slope) #LeakyReLU激活函数
torch.nn.ELU(alpha) #ELU激活函数
torch.nn.SELU() #SELU激活函数
torch.nn.GELU() #GELU激活函数

"""
损失函数
"""
torch.nn.MSELoss() #均方误差损失
torch.nn.L1Loss() #L1损失
torch.nn.CrossEntropyLoss() #交叉熵损失
torch.nn.NLLLoss() #负对数似然损失。
torch.nn.BCELoss() #二分类交叉熵损失
torch.nn.BCEWithLogitsLoss() #带Sigmoid的二分类交叉熵损失
torch.nn.KLDivLoss() #KL散度损失。
torch.nn.HingeEmbeddingLoss() #铰链嵌入损失
torch.nn.MultiMarginLoss() #多分类间隔损失
torch.nn.SmoothL1Loss() #平滑L1损失

"""
归一化层
"""
torch.nn.BatchNorm1d(num_features) #一维批归一化层
torch.nn.BatchNorm2d(num_features) #二维批归一化层
torch.nn.BatchNorm3d(num_features) #三维批归一化层
torch.nn.LayerNorm(normalized_shape) #层归一化
torch.nn.InstanceNorm1d(num_features) #一维实例归一化层
torch.nn.InstanceNorm2d(num_features) #二维实例归一化层
torch.nn.InstanceNorm3d(num_features) #三维实例归一化层
torch.nn.GroupNorm(num_groups,num_channels) #组归一化

"""
循环神经网络层
"""
torch.nn.RNN(input_size,hidden_size) #简单RNN层
torch.nn.LSTM(input_size,hidden_size) #LSTM层
torch.nn.GRU(input_size,hidden_size) #GRU层
torch.nn.RNNCell(input_size,hidden_size) #简单RNN单元 
torch.nn.LSTMCell(input_size,hidden_size) #LSTM单元
torch.nn.GRUCell(input_size,hidden_size) #GRU单元

"""
嵌入层
"""
torch.nn.Embedding(num_embeddings,embedding_dim) #嵌入层

"""
Dropout层
"""
torch.nn.Dropout(p) #dropout层
torch.nn.Dropout2d(p) #2d dropout层
torch.nn.Dropout3d(p) #3d dropout层

"""
实用函数
"""
torch.nn.functional.relu(input) #应用ReLU函数激活
torch.nn.functional.sigmoid(input) #应用Sigmoid函数激活
torch.nn.functional.softmax(input,dim) #引用softmax激活函数
torch.nn.functional.cross_entropy(input,target) #计算交叉熵损失
torch.nn.functional.mse_loss(input,target) #计算均方差损失

"""
实例
"""

import torch
import torch.nn as nn
class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet,self).__init__()
        self.fc1=nn.Linear(10,20)
        self.rule=nn.ReLU()
        self.fc2=nn.Linear(20,1)
    def forward(self,x):
        x=self.fc1(x)
        x=self.relu(x)
        x=self.fc2(x)
        return x
model=SimpleNet()
input=torch.randn(5,10)
output=model(input)
print(output)