# -*- coding:utf-8 -*-#
# @Time:2023/5/31 14:46
# @Author:Adong
# @Software:PyCharm


import torch
from torch import nn
from torchvision import models
import torch.nn.functional as F


class MyLeNet5_NCL(nn.Module):
    '''
    基于LeNet-5构建的卷积神经网络，结合NCL负相关集成
    '''

    def __init__(self, output_size):
        '''

        :param output_size: 输出尺寸
        '''
        super(MyLeNet5_NCL, self).__init__()  # 继承父类（Module）的初始化函数，也就是把父类初始化函数的内容添加到子类初始化函数中，避免覆盖父类初始化函数。
        self.Sigmoid = nn.Sigmoid()  # sigmoid激活函数
        self.ReLu = nn.ReLU()  # relu激活函数
        self.l1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5,
                            padding=2)  # in_channels表示输入图像的通道数，out_channels表示卷积产生的通道数，kernel_size表示卷积核大小，padding表示卷积的步幅
        self.l2 = nn.MaxPool2d(kernel_size=2, stride=2)  # MaxPool2d表示使用最大池化，kernel_size表示池化窗口的大小，stride表示窗口的步幅
        self.l3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
        self.l4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
        self.l6 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l7 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
        self.l8 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.output = nn.Linear(in_features=768, out_features=output_size)
        '''基学习器1'''
        self.f1 = nn.Linear(in_features=256 * 4 * 4,
                            out_features=2560)  # 线性全连接层，in_features表示输入样本的大小，out_features表示输出样本的大小，bias默认为True
        self.f2 = nn.Linear(in_features=2560, out_features=768)
        '''基学习器2'''
        self.f3 = nn.Linear(in_features=256 * 4 * 4, out_features=2560)
        self.f4 = nn.Linear(in_features=2560, out_features=768)
        '''基学习器3'''
        self.f5 = nn.Linear(in_features=256 * 4 * 4, out_features=2560)
        self.f6 = nn.Linear(in_features=2560, out_features=768)
        '''基学习器4'''
        self.f7 = nn.Linear(in_features=256 * 4 * 4, out_features=2560)
        self.f8 = nn.Linear(in_features=2560, out_features=768)
        '''基学习器5'''
        self.f9 = nn.Linear(in_features=256 * 4 * 4, out_features=2560)
        self.f10 = nn.Linear(in_features=2560, out_features=768)

    def forward(self, x):  # 前向传播函数，x表示输入的图像（待识别的手写数字图像）
        x = self.ReLu(self.l1(x))  # 卷积1
        x = self.l2(x)  # 池化1
        x = self.ReLu(self.l3(x))  # 卷积2
        x = self.l4(x)  # 池化2
        x = self.ReLu(self.l5(x))  # 卷积3
        x = self.l6(x)  # 池化3
        x = self.ReLu(self.l7(x))  # 卷积4
        x = self.l8(x)  # 池化4
        # x = x.flatten()            # 把高维张量拉直成一维
        x = x.view(-1, 256 * 4 * 4)  # x.view(a,b)表示把x转化成a*b的矩阵，-1表示根据b匹配a。例如这里将5*5*16的输入转化成1行16*5*5列的数组，把图片拉直。
        x1 = self.output(self.f2(self.f1(x)))  # 基学习器1的输出
        x2 = self.output(self.f4(self.f3(x)))  # 基学习器2的输出
        x3 = self.output(self.f6(self.f5(x)))  # 基学习器3的输出
        x4 = self.output(self.f8(self.f7(x)))  # 基学习器4的输出
        x5 = self.output(self.f10(self.f9(x)))  # 基学习器5的输出
        return x1, x2, x3, x4, x5


class MyLeNet5(nn.Module):
    '''
    基于LeNet-5构建的卷积神经网络
    '''

    def __init__(self, output_size):
        '''

        :param output_size:输出尺寸
        '''
        super(MyLeNet5, self).__init__()  # 继承父类（Module）的初始化函数，也就是把父类初始化函数的内容添加到子类初始化函数中，避免覆盖父类初始化函数。
        self.Sigmoid = nn.Sigmoid()  # sigmoid激活函数
        self.ReLu = nn.ReLU()  # relu激活函数
        self.l1 = nn.Conv2d(in_channels=3, out_channels=4, kernel_size=5,
                            padding=2)  # in_channels表示输入图像的通道数，out_channels表示卷积产生的通道数，kernel_size表示卷积核大小，padding表示卷积的步幅
        self.l2 = nn.MaxPool2d(kernel_size=2, stride=2)  # MaxPool2d表示使用最大池化，kernel_size表示池化窗口的大小，stride表示窗口的步幅
        self.l3 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3, padding=1)
        self.l4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l5 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, padding=1)
        self.l6 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l7 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1)
        self.l8 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.f1 = nn.Linear(in_features=32 * 14 * 14,
                            out_features=2560)  # 线性全连接层，in_features表示输入样本的大小，out_features表示输出样本的大小，bias默认为True
        self.f2 = nn.Linear(in_features=2560, out_features=768)
        self.output = nn.Linear(in_features=768, out_features=output_size)

    def forward(self, x):  # 前向传播函数，x表示输入的图像（待识别的手写数字图像）
        x = self.ReLu(self.l1(x))  # 卷积1
        x = self.l2(x)  # 池化1
        x = self.ReLu(self.l3(x))  # 卷积2
        x = self.l4(x)  # 池化2
        x = self.ReLu(self.l5(x))  # 卷积3
        x = self.l6(x)  # 池化3
        x = self.ReLu(self.l7(x))  # 卷积4
        x = self.l8(x)  # 池化4
        # x = x.flatten()            # 把高维张量拉直成一维
        x = x.view(-1, 32 * 14 * 14)  # x.view(a,b)表示把x转化成a*b的矩阵，-1表示根据b匹配a。例如这里将5*5*16的输入转化成1行16*5*5列的数组，把图片拉直。
        x = self.f1(x)  # 将s4池化层与f5全连接层进行全连接
        x = self.f2(x)  # 将f5全连接层与f6全连接层进行全连接
        x = self.output(x)
        # x = F.softmax(x,dim=1)  # 将f6全连接层与output层进行全连接
        return x  # 输出output层的分类结果（各个类别的概率）


class ResNet50_NCL(nn.Module):
    '''
    迁移预训练的ResNet-50，应用NCL负相关集成
    '''

    def __init__(self, output_size):
        '''

        :param output_size:输出尺寸
        '''
        super(ResNet50_NCL, self).__init__()
        self.resnet50 = models.resnet50(weights=models.ResNet50_Weights.DEFAULT)
        self.dropout = nn.Dropout(p=0.5)
        self.relu = nn.ReLU()
        resnet50_outfeatures = self.resnet50.fc.out_features
        self.f1 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f2 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f3 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f4 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)
        self.f5 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)

    def forward(self, x):
        x = self.resnet50(x)
        # x = self.dropout(x)       #增加稀疏性，但是训练速度会变慢很多，据说可以缓解过拟合
        x = self.relu(x)
        o1 = self.f1(x)
        o2 = self.f2(x)
        o3 = self.f3(x)
        o4 = self.f4(x)
        o5 = self.f5(x)
        return o1, o2, o3, o4, o5


class ResNet50(nn.Module):
    '''
    调用预训练的ResNet，增加一层全连接层
    '''

    def __init__(self, output_size):
        '''

        :param output_size:输出尺寸
        '''
        super(ResNet50, self).__init__()
        self.resnet50 = models.resnet50(weights=models.ResNet50_Weights.DEFAULT)
        self.relu = nn.ReLU()
        resnet50_outfeatures = self.resnet50.fc.out_features
        self.f1 = nn.Linear(in_features=resnet50_outfeatures, out_features=output_size)

    def forward(self, x):
        x = self.resnet50(x)
        x = self.relu(x)
        o1 = self.f1(x)
        return o1


class AE(nn.Module):
    '''
    编码器的网络结构
    '''

    def __init__(self, l1_size, l2_size, l3_size, l4_size, l5_size):
        '''

        :param l1_size:第一层尺寸
        :param l2_size:第二层尺寸
        :param l3_size:第三层尺寸
        :param l4_size:第四层尺寸
        :param l5_size:四五层尺寸
        '''
        super(AE, self).__init__()
        self.ReLu = nn.ReLU()  # relu激活函数
        self.linear1 = nn.Linear(l1_size, l2_size)
        self.linear2 = nn.Linear(l2_size, l3_size)
        self.linear3 = nn.Linear(l3_size, l4_size)
        self.linear4 = nn.Linear(l4_size, l5_size)

    def forward(self, x):
        x = self.linear1(x)
        x = self.ReLu(x)
        x = self.linear2(x)
        x = self.linear3(x)
        x = self.ReLu(x)
        x = self.linear4(x)
        x = self.ReLu(x)
        return x

class LSTM(nn.Module):
    """
        LSTM的网络结构
    """

    def __init__(self, input_size, hidden_size, output_size=1, num_layers=2):
        '''

        :param input_size: 输入层神经元个数
        :param hidden_size: 隐藏层神经元个数
        :param output_size: 输出层神经元个数
        :param num_layers: 堆叠LSTM层数
        '''
        super().__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)        # 设置LSTM结构
        self.fc = nn.Linear(hidden_size, output_size)                   # 设置隐藏层与输出层之间的全连接层

    def forward(self, _x):
        x, _ = self.lstm(_x)    # 输出数据包括output, (h_t, c_t):h_t包含的是句子的最后一个单词的隐藏状态，c_t包含的是句子的最后一个单词的细胞状态
        s, b, h = x.shape       # x is output, size (seq_len, batch, hidden_size)
        x = x.view(s * b, h)    # 重构输出的形状，把所有批的输出结果合并在一起
        x = self.fc(x)          # 输入全连接层得到输出
        x = x.view(s, b, -1)    # 把形状改回来
        return x


class GAN(nn.Module):
    '''
    对抗生成网络的网络结构
    '''
    def __init__(self,in_features,out_features):
        super(GAN, self).__init__()
        self.discriminator = nn.Sequential(nn.Linear(out_features, 128),
                                  nn.LeakyReLU(0.1),  # 由于生成对抗网络的损失非常容易梯度消失，因此使用LeakyReLU
                                  nn.Linear(128, 1),
                                  nn.Sigmoid()
                                  )
        self.generator = nn.Sequential(nn.Linear(in_features, 256)
                                 # ,nn.BatchNorm1d(256)
                                 , nn.LeakyReLU(0.1)
                                 , nn.Linear(256, out_features)
                                 , nn.Tanh()  # 用于归一化数据
                                 )
    def disc_forward(self,data):
        return self.discriminator(data)
    def gen_forward(self,data):
        return self.generator(data)

class SelectItem(nn.Module):
    '''
    由于lstm返回tuple，不能直接写入Sequential进行前向传播，
    编写SelectItem模块，作为前向传播的一个环节提取lstm的结果，
    使得lstm能够写入Sequential，同一个Sequential有利于同时调整权重。
    '''
    def __init__(self, item_index):
        super(SelectItem, self).__init__()
        self._name = 'selectitem'
        self.item_index = item_index

    def forward(self, inputs):
        return inputs[self.item_index]


class Reshape(nn.Module):
    def __init__(self, *args):
        super(Reshape, self).__init__()
        self.shape = args

    def forward(self, x):
        return x.view(self.shape)

class LSTM_GAN(nn.Module):
    def __init__(self,LSTM_SIZE):
        super(LSTM_GAN, self).__init__()
        self.discriminator = nn.Sequential(
            nn.Linear(4096, LSTM_SIZE[0]),
            nn.LeakyReLU(0.1),
            nn.LSTM(input_size=LSTM_SIZE[0], hidden_size=LSTM_SIZE[1], num_layers=LSTM_SIZE[2]),
            SelectItem(0),
            nn.Linear(LSTM_SIZE[1], 512),
            nn.Sigmoid(),  # 用于归一化数据
            nn.Linear(512, 1),
            nn.Sigmoid()  # 用于归一化数据
                                  )
        self.generator = nn.Sequential(
            nn.Linear(4096,  LSTM_SIZE[0]),
            nn.LeakyReLU(0.1),
            nn.LSTM(input_size=LSTM_SIZE[0], hidden_size=LSTM_SIZE[1], num_layers=LSTM_SIZE[2]),
            SelectItem(0),
            nn.Linear(LSTM_SIZE[1],  4096),
                                 )

    def disc_forward(self, data):
        return self.discriminator(data)

    def gen_forward(self, data):
        return self.generator(data)



if __name__ == "__main__":
    pass