﻿from math import log

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

class BasicBlock(nn.Module):
    def __init__(self, in_planes, planes, conv_type , kernel_size,  stride=1):
        super(BasicBlock, self).__init__()
        self.expansion =1
        if conv_type == 1:
            if kernel_size == 3:  self.padding = 2
            elif kernel_size == 5: self.padding = 4
            self.Conv = nn.Sequential(          #DilConv
                nn.ReLU(inplace=False),  # 这里的stride不一定是1
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          dilation=2,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),    )

        elif conv_type == 2:
            if kernel_size == 3: self.padding = 1
            elif kernel_size == 5: self.padding = 2
            self.Conv = nn.Sequential(         #SepConv
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),
            )
        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion * planes)
            )

    def forward(self, x):
        out = self.Conv(x)
        out += self.shortcut(x)
        # out = F.relu(out)
        return out

class BasicUnit(nn.Module):
    def __init__(self,  in_channel, out_channel , amount ,conv_type , kernel_size):   #数量是控制块的多少，输入的通道数和输出的通道数得出来
        super(BasicUnit, self).__init__()
        self.type = 1  #这里单独把type设置为1
        self.inchannel = in_channel
        self.conv_type_ = conv_type
        self.kernel_size_ = kernel_size
        self.layer = self.make_layer(BasicBlock, out_channel, amount, conv_type , kernel_size,  stride=1)    #这里只需要使用输出通道控制后面

    def make_layer(self, block, channels, num_blocks, _conv_type_ , _kernel_size_,  stride):
        strides = [stride] + [1] * (num_blocks - 1)  # strides=[1,1]
        layers = []
        for stride in strides:
            layers.append(block(self.inchannel, channels,_conv_type_ , _kernel_size_, stride))
            self.inchannel = channels
        return nn.Sequential(*layers)

    def forward(self,x):
        out = self.layer(x)  ##128*3*32*32-》128*64*32*32

        return out

class PoolUnit(nn.Module):
    def __init__(self, pool_type):
        super(PoolUnit, self).__init__()
        self.type = 2    # 这里只代表类型为2
        self.pool_type = pool_type

        if self.pool_type == 1:
            self.select_pooltype = nn.AvgPool2d(3, stride=2, padding=1)
              # 自适应，特征图减半
        elif self.pool_type == 2:
            self.select_pooltype = nn.MaxPool2d(3, stride=2, padding=1)

    def forward(self, x):
        out = self.select_pooltype(x)
        return out

# class AttentionUnit(nn.Module):
#     """Constructs a AU module.
#
#     Args:
#         channel: Number of channels of the input feature map
#         k_size: Adaptive selection of kernel size
#     """
#     def __init__(self, channel , kernel_type):
#         super(AttentionUnit, self).__init__()
#         self.type = 3
#         # select dilated convolution kernel size
#         if kernel_type == 0:
#             # self.dilatedConv = nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=2,
#             #                           dilation=2, groups=channel, bias=False),
#             self.dilatedConv = nn.Sequential(         #SepConv
#                 nn.ReLU(inplace=False),
#                 # nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=1, bias=False),
#                 nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=2,
#                           dilation=2, groups=channel, bias=False),
#                 nn.BatchNorm2d(channel, affine=False),
#             )
#         elif kernel_type == 1:
#             # self.dilatedConv = nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=4,
#             #                      dilation=2, groups=channel, bias=False)
#             self.dilatedConv = nn.Sequential(         #SepConv
#                 nn.ReLU(inplace=False),
#                 # nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=2,  bias=False),
#                 nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=4,
#                                                dilation=2, groups=channel, bias=False),
#                 nn.BatchNorm2d(channel, affine=False),
#             )
#         self.avg_pool = nn.AdaptiveAvgPool2d(1)
#         self.max_pool = nn.AdaptiveMaxPool2d(1)
#         self.fc = nn.Sequential(
#             nn.Conv2d(channel, channel//16,1, bias=False),
#             nn.ReLU(),
#             nn.Conv2d(channel//16, channel ,1, bias=False)
#         )
#
#         self.conv2 = nn.Conv2d(2, 1, 7, padding=7//2, bias=False)
#
#         self.sigmoid = nn.Sigmoid()
#
#     def forward(self, x):
#
#         # x: input features with shape [b, c, h, w]
#         b, c, h, w = x.size()
#         x = self.dilatedConv(x)
#         # feature descriptor on the global spatial information
#         y1 = self.fc(self.avg_pool(x))
#         y2 = self.fc(self.max_pool(x))
#         y3 = y1 + y2
#         # Multi-scale information fusion
#         y3 = self.sigmoid(y3)
#         # out1 = x * y3.expand_as(x)
#         out1 = x * y3
#         z1 = torch.mean(out1 , dim=1, keepdim=True)
#         z2 , _ = torch.max(out1, dim=1, keepdim=True)
#         z3 = torch.cat([z1, z2], dim=1)
#         z4 = self.sigmoid(self.conv2(z3))
#         # out2 = out1 * z4.expand_as(out1)
#         out2 = out1 * z4
#         return out2

#---------------------------------------------------Consecutive_inovation----------------------------------------------
# class AttentionUnit(nn.Module):
#     """Constructs a AU module.
#
#     Args:
#         channel: Number of channels of the input feature map
#         k_size: Adaptive selection of kernel size
#     """
#     def __init__(self, channel , kernel_type):
#         super(AttentionUnit, self).__init__()
#         self.type = 3
#
#         # select dilated convolution kernel size
#         if kernel_type == 0:
#             # self.dilatedConv = nn.Conv2d(channel, channel, kernel_size = 3, stride=1, padding=2,
#             #                      dilation=2, groups=channel, bias=False)
#             self.dilatedConv = nn.Sequential(  # SepConv
#                 nn.ReLU(inplace=False),
#                 nn.Conv2d(channel, channel, kernel_size = 3, stride=1, padding=2,
#                                  dilation=2, groups=channel, bias=False),
#                 nn.BatchNorm2d(channel, affine=False),
#             )
#         elif kernel_type == 1:
#             # self.dilatedConv = nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=4,
#             #                      dilation=2, groups=channel, bias=False)
#             self.dilatedConv = nn.Sequential(  # SepConv
#                 nn.ReLU(inplace=False),
#                 nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=4,
#                                  dilation=2, groups=channel, bias=False),
#                 nn.BatchNorm2d(channel, affine=False),
#             )
#
#         adapt_k = int(abs(((log(channel,2)/2)+0.5)))
#         # adapt_k = abs(((log(channel,2)/2)+0.5))
#         self.k_size = adapt_k - 1 if adapt_k % 2 == 0 else adapt_k
#         self.avg_pool = nn.AdaptiveAvgPool2d(1)
#         self.conv = nn.Conv1d(1, 1, kernel_size = self.k_size, padding=(self.k_size - 1) // 2, bias=False)
#         self.sigmoid = nn.Sigmoid()
#
#     def forward(self, x):
#         # x: input features with shape [b, c, h, w]
#         b, c, h, w = x.size()
#         x = self.dilatedConv(x)
#         # feature descriptor on the global spatial information
#         y = self.avg_pool(x)
#
#         # Two different branches of ECA module
#         y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
#
#         # Multi-scale information fusion
#         y = self.sigmoid(y)
#
#         return x * y.expand_as(x)
#---------------------------------------------------Consecutive_inovation----------------------------------------------

#---------------------------------------------------Consecutive_inovation----------------------------------------------
# class AttentionUnit(nn.Module):
#     """Constructs a AU module.
#
#     Args:
#         channel: Number of channels of the input feature map
#         k_size: Adaptive selection of kernel size
#     """
#     def __init__(self, channel , kernel_type):
#         super(AttentionUnit, self).__init__()
#         self.type = 3
#         #select dilated convolution kernel size
#         if kernel_type == 0:
#             self.dilatedConv = nn.Sequential(         #SepConv
#                         nn.ReLU(inplace=False),
#                         nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=2,
#                                   dilation=2, groups=channel, bias=False),
#                         nn.BatchNorm2d(channel, affine=False), )
#         elif kernel_type == 1:
#             self.dilatedConv = nn.Sequential(         #SepConv
#                         nn.ReLU(inplace=False),
#                         nn.Conv2d(channel, channel, kernel_size=5, stride=1, padding=4,
#                                                        dilation=2, groups=channel, bias=False),
#                         nn.BatchNorm2d(channel, affine=False), )
#
#         adapt_k = int(abs(((log(channel,2)/2)+0.5)))
#
#         self.k_size = adapt_k - 1 if adapt_k % 2 == 0 else adapt_k
#         self.avg_pool = nn.AdaptiveAvgPool2d(1)
#         self.conv1 = nn.Conv1d(1, 1, kernel_size = self.k_size, padding=(self.k_size - 1) // 2, bias=False)
#
#         self.max_pool = nn.AdaptiveMaxPool2d(1)
#         self.conv2 = nn.Conv2d(2, 1, 7, padding=7//2, bias=False)
#
#         self.sigmoid = nn.Sigmoid()
#
#     def forward(self, x):
#         # x: input features with shape [b, c, h, w]
#         b, c, h, w = x.size()
#         x = self.dilatedConv(x)
#         # feature descriptor on the global spatial information
#         y1 = self.avg_pool(x)
#         # y2 = self.max_pool(x)
#         # Two different branches of ECA module
#         y1 = self.conv1(y1.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
#         # y2 = self.conv1(y2.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
#
#         # Multi-scale information fusion
#         # y = self.sigmoid(y1 + y2)
#         y = self.sigmoid(y1)
#         out = x * y.expand_as(x)
#
#         # out = x * y
#
#         # z1 = torch.mean(out , dim=1, keepdim=True)
#         # z2 , _ = torch.max(out, dim=1, keepdim=True)
#         # z3 = torch.cat([z1, z2], dim=1)
#         # z4 = self.sigmoid(self.conv2(z3))
#         # out = out * z4
#         return out
#---------------------------------------------------Consecutive_inovation----------------------------------------------

#--------------------------------------------------------BAM-----------------------------------------------------------
# class Flatten(nn.Module):
#     def forward(self, x):
#         return x.view(x.size(0), -1)
# class ChannelGate(nn.Module):
#     def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
#         super(ChannelGate, self).__init__()
#         # self.gate_activation = gate_activation
#         self.gate_c = nn.Sequential()
#         self.gate_c.add_module( 'flatten', Flatten() )
#         gate_channels = [gate_channel]
#         gate_channels += [gate_channel // reduction_ratio] * num_layers
#         gate_channels += [gate_channel]
#         for i in range( len(gate_channels) - 2 ):
#             self.gate_c.add_module( 'gate_c_fc_%d'%i, nn.Linear(gate_channels[i], gate_channels[i+1]) )
#             self.gate_c.add_module( 'gate_c_bn_%d'%(i+1), nn.BatchNorm1d(gate_channels[i+1]) )
#             self.gate_c.add_module( 'gate_c_relu_%d'%(i+1), nn.ReLU() )
#         self.gate_c.add_module( 'gate_c_fc_final', nn.Linear(gate_channels[-2], gate_channels[-1]) )
#     def forward(self, in_tensor):
#         avg_pool = F.avg_pool2d( in_tensor, in_tensor.size(2), stride=in_tensor.size(2) )
#         return self.gate_c( avg_pool ).unsqueeze(2).unsqueeze(3).expand_as(in_tensor)
#
# class SpatialGate(nn.Module):
#     def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4):
#         super(SpatialGate, self).__init__()
#         self.gate_s = nn.Sequential()
#         self.gate_s.add_module( 'gate_s_conv_reduce0', nn.Conv2d(gate_channel, gate_channel//reduction_ratio, kernel_size=1))
#         self.gate_s.add_module( 'gate_s_bn_reduce0',	nn.BatchNorm2d(gate_channel//reduction_ratio) )
#         self.gate_s.add_module( 'gate_s_relu_reduce0',nn.ReLU() )
#         for i in range( dilation_conv_num ):
#             self.gate_s.add_module( 'gate_s_conv_di_%d'%i, nn.Conv2d(gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3, \
# 						padding=dilation_val, dilation=dilation_val) )
#             self.gate_s.add_module( 'gate_s_bn_di_%d'%i, nn.BatchNorm2d(gate_channel//reduction_ratio) )
#             self.gate_s.add_module( 'gate_s_relu_di_%d'%i, nn.ReLU() )
#         self.gate_s.add_module( 'gate_s_conv_final', nn.Conv2d(gate_channel//reduction_ratio, 1, kernel_size=1) )
#     def forward(self, in_tensor):
#         return self.gate_s( in_tensor ).expand_as(in_tensor)
#
# class AttentionUnit(nn.Module):
#     def __init__(self, gate_channels, kernel_type):
#         super(AttentionUnit, self).__init__()
#         if kernel_type == 0:
#             self.dilatedConv = nn.Sequential(
#                                 nn.ReLU(inplace=False),
#                                 nn.Conv2d(gate_channels, gate_channels, kernel_size=3, stride=1, padding=2,
#                                           dilation=2, groups=gate_channels, bias=False),
#                                 nn.BatchNorm2d(gate_channels, affine=False), )
#         elif kernel_type == 1:
#                     self.dilatedConv = nn.Sequential(
#                                 nn.ReLU(inplace=False),
#                                 nn.Conv2d(gate_channels, gate_channels, kernel_size=5, stride=1, padding=4,
#                                                                dilation=2, groups=gate_channels, bias=False),
#                                 nn.BatchNorm2d(gate_channels, affine=False), )
#         self.channel_att = ChannelGate(gate_channels)
#         self.spatial_att = SpatialGate(gate_channels)
#     def forward(self,in_tensor):
#         in_tensor = self.dilatedConv(in_tensor)
#         att = 1 + F.sigmoid( self.channel_att(in_tensor) * self.spatial_att(in_tensor) )
#         return att * in_tensor
#--------------------------------------------------------BAM-----------------------------------------------------------

#-------------------------------------------------------------------------CBAM----------------------------------------
class AttentionUnit(nn.Module):
    def __init__(self, gate_channels, kernel_type, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
        super(AttentionUnit, self).__init__()
        self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
        self.no_spatial=no_spatial
        if kernel_type == 0:
            self.dilatedConv = nn.Sequential(  nn.ReLU(inplace=False),
                                nn.Conv2d(gate_channels, gate_channels, kernel_size=3, stride=1, padding=2,
                                          dilation=2, groups=gate_channels, bias=False),
                                nn.BatchNorm2d(gate_channels, affine=False), )
        elif kernel_type == 1:
                    self.dilatedConv = nn.Sequential(    nn.ReLU(inplace=False),
                                nn.Conv2d(gate_channels, gate_channels, kernel_size=5, stride=1, padding=4,
                                                               dilation=2, groups=gate_channels, bias=False),
                                nn.BatchNorm2d(gate_channels, affine=False), )
        if not no_spatial:
            self.SpatialGate = SpatialGate()
        #select dilated convolution kernel size

    def forward(self, x):
        x = self.dilatedConv(x)
        x_out = self.ChannelGate(x)
        if not self.no_spatial:
            x_out = self.SpatialGate(x_out)
        return x_out

class BasicConv(nn.Module):
    def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
        super(BasicConv, self).__init__()
        self.out_channels = out_planes
        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
        self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
        self.relu = nn.ReLU() if relu else None

    def forward(self, x):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.relu is not None:
            x = self.relu(x)
        return x

class Flatten(nn.Module):
    def forward(self, x):
        return x.view(x.size(0), -1)

class ChannelGate(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']):
        super(ChannelGate, self).__init__()
        self.gate_channels = gate_channels
        self.mlp = nn.Sequential(
            Flatten(),
            nn.Linear(gate_channels, gate_channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(gate_channels // reduction_ratio, gate_channels)
            )
        self.pool_types = pool_types
    def forward(self, x):
        channel_att_sum = None
        for pool_type in self.pool_types:
            if pool_type=='avg':
                avg_pool = F.avg_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( avg_pool )
            elif pool_type=='max':
                max_pool = F.max_pool2d( x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( max_pool )
            elif pool_type=='lp':
                lp_pool = F.lp_pool2d( x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
                channel_att_raw = self.mlp( lp_pool )
            elif pool_type=='lse':
                # LSE pool only
                lse_pool = logsumexp_2d(x)
                channel_att_raw = self.mlp( lse_pool )

            if channel_att_sum is None:
                channel_att_sum = channel_att_raw
            else:
                channel_att_sum = channel_att_sum + channel_att_raw

        scale = F.sigmoid( channel_att_sum ).unsqueeze(2).unsqueeze(3).expand_as(x)
        return x * scale

def logsumexp_2d(tensor):
    tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
    s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
    outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log()
    return outputs

class ChannelPool(nn.Module):
    def forward(self, x):
        return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )

class SpatialGate(nn.Module):
    def __init__(self):
        super(SpatialGate, self).__init__()
        kernel_size = 7
        self.compress = ChannelPool()
        self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
    def forward(self, x):
        x_compress = self.compress(x)
        x_out = self.spatial(x_compress)
        scale = F.sigmoid(x_out) # broadcasting
        return x * scale
#-------------------------------------------------------------------------CBAM----------------------------------------

class Individual(object):
    def __init__(self, params):
        self.params = params

        self.BasicBlock_min_limit = params['BasicBlock_min_limit']
        self.BasicBlock_max_limit = params['BasicBlock_max_limit']
        self.BU_MinOutChannels_list = params['BU_MinOutChannels_list']
        self.BU_MaxOutChannels_list = params['BU_MaxOutChannels_list']
        self.BU_conv_type_list = params['BU_conv_type_list']
        self.BU_kernel_size_list = params['BU_kernel_size_list']

        self.BU_minlimit = params['BU_minlimit']
        self.BU_maxlimit = params['BU_maxlimit']

        self.PU_minlimit = params['PU_minlimit']
        self.PU_maxlimit = params['PU_maxlimit']

        self.AU_minlimit = params['AU_minlimit']
        self.AU_maxlimit = params['AU_maxlimit']
        self.AU_conv_type_list = params['AU_conv_type_list']

        self.init_inchannel = params['init_inchannels']
        # self.Max_out_channels = params['Max_out_channels']
        self.init_size = params['init_size']

        self.init_max_len = params['init_max_len']

    def initialize_individual(self):
        # initialize how many convolution and pooling layers will be used
        individual = {}
        BU_randnum = np.random.randint(self.BU_minlimit, self.BU_maxlimit + 1)
        PU_randnum = np.random.randint(self.PU_minlimit, self.PU_maxlimit + 1) #self.PU_limit=4
        AU_randnum = np.random.randint(self.AU_minlimit, self.AU_maxlimit + 1)
        # find the position where the pooling layer can be connected
        individual_len = BU_randnum + PU_randnum + AU_randnum
        order_type = [None for _ in range(individual_len)]
        if BU_randnum > 0: order_type[0 : BU_randnum] = [1] * BU_randnum
        if PU_randnum > 0: order_type[BU_randnum : BU_randnum + PU_randnum] = [2] * PU_randnum
        if AU_randnum > 0: order_type[BU_randnum + PU_randnum:BU_randnum + PU_randnum + AU_randnum] = [3] * AU_randnum
        for _ in range(10):
            np.random.shuffle(order_type)
        while True:
            if order_type[0] == 2:
                np.random.shuffle(order_type)  # Forbid fist unit is pool unit and attention unit
            else:
                break

        # size = [None for _ in range(20)]
        # pool_type = [None for _ in range(20)]
        # inchannels = [None for _ in range(20)]
        # outchannels = [None for _ in range(20)]
        # BasicBlock_amount = [None for _ in range(20)]
        # BU_outchannels = [None for _ in range(20)]
        # BU_conv_type = [None for _ in range(20)]
        # BU_kernel_size = [None for _ in range(20)]
        # AU_conv_type = [None for _ in range(20)]
        size = [None for _ in range(self.init_max_len)]
        pool_type = [None for _ in range(self.init_max_len)]
        inchannels = [None for _ in range(self.init_max_len)]
        outchannels = [None for _ in range(self.init_max_len)]
        BasicBlock_amount = [None for _ in range(self.init_max_len)]
        BU_outchannels = [None for _ in range(self.init_max_len)]
        BU_conv_type = [None for _ in range(self.init_max_len)]
        BU_kernel_size = [None for _ in range(self.init_max_len)]
        AU_conv_type = [None for _ in range(self.init_max_len)]

        BasicBlock_amount[0] = np.random.randint(self.BasicBlock_min_limit, self.BasicBlock_max_limit + 1)
        BU_outchannels[0] = np.random.randint(self.BU_MinOutChannels_list, self.BU_MaxOutChannels_list+1)
        BU_conv_type[0] = self.BU_conv_type_list[np.random.randint(0, len(self.BU_conv_type_list))]
        BU_kernel_size[0] = self.BU_kernel_size_list[np.random.randint(0, len(self.BU_kernel_size_list))]

        AU_conv_type[0] = self.AU_conv_type_list[np.random.randint(0, len(self.AU_conv_type_list))]

        size[0] = self.init_size  # Initial size is 32
        pool_type[0] = np.random.randint(1, 3)  # The one mean Maxpool and two mean Meanpool
        inchannels[0] = self.init_inchannel
        outchannels[0] = BU_outchannels[0]  # 初始通道为3到Max_out_channels的随机数

        encode_layers = [] #store per-layer encoded information
        i = 0            # control channel、pool_type and size
        for type in order_type: #for type in order_type
            if type == 1:
                outchannels[i] = BU_outchannels[i]
                encode_layers.append(BasicUnit(int(inchannels[i]), int(outchannels[i]), int(BasicBlock_amount[i]), int(BU_conv_type[i]),int(BU_kernel_size[i])))
                i = i + 1
                BasicBlock_amount[i] = np.random.randint(self.BasicBlock_min_limit, self.BasicBlock_max_limit + 1)
                BU_outchannels[i] = np.random.randint(self.BU_MinOutChannels_list, self.BU_MaxOutChannels_list+1)
                BU_conv_type[i] = self.BU_conv_type_list[np.random.randint(0, len(self.BU_conv_type_list))]
                BU_kernel_size[i] = self.BU_kernel_size_list[np.random.randint(0, len(self.BU_kernel_size_list))]

                AU_conv_type[i] = AU_conv_type[i - 1]

                pool_type[i] = pool_type[i-1]
                size[i] = size[i - 1]
                inchannels[i] = outchannels[i - 1]
                outchannels[i] = inchannels[i]
            elif type == 2:
                encode_layers.append(PoolUnit(int(pool_type[i])))
                i = i + 1
                pool_type[i] = np.random.randint(1, 3)
                size[i] = int(size[i - 1] / 2)

                BasicBlock_amount[i] = BasicBlock_amount[i - 1]
                BU_outchannels[i] = BU_outchannels[i - 1]
                BU_conv_type[i] = BU_conv_type[ i-1 ]
                BU_kernel_size[i] = BU_kernel_size[ i-1 ]

                AU_conv_type[i] = AU_conv_type[i - 1]

                inchannels[i] = outchannels[i - 1]
                outchannels[i] = inchannels[i]
            elif type == 3:
                outchannels[i] = inchannels[i]
                encode_layers.append(AttentionUnit(int(inchannels[i]),int(AU_conv_type[i])))
                i = i + 1
                AU_conv_type[i] = self.AU_conv_type_list[np.random.randint(0, len(self.AU_conv_type_list))]

                BasicBlock_amount[i] = BasicBlock_amount[i - 1]
                BU_outchannels[i] = BU_outchannels[i - 1]
                BU_conv_type[i] = BU_conv_type[ i-1 ]
                BU_kernel_size[i] = BU_kernel_size[ i-1 ]

                pool_type[i] = pool_type[i - 1]
                size[i] = size[i - 1]
                inchannels[i] = outchannels[i - 1]
                outchannels[i] = inchannels[i]

        individual['inchannels'] = inchannels[0:len(order_type)]
        individual['outchannels'] = outchannels[0:len(order_type)]
        individual['pool_type'] = pool_type[0:len(order_type)]
        individual['BasicBlock_amount'] = BasicBlock_amount[0:len(order_type)]
        individual['BU_outchannels'] = BU_outchannels[0:len(order_type)]
        individual['BU_conv_type'] = BU_conv_type[0:len(order_type)]
        individual['BU_kernel_size'] = BU_kernel_size[0:len(order_type)]

        individual['AU_conv_type'] = AU_conv_type[0:len(order_type)]

        individual['acc'] = -1.0
        individual['size'] = size  # Be used fully layer
        individual['encode_layers'] = encode_layers
        individual['order_type'] = order_type
        return individual

    def update_offspring_information(self , offsprings):
        for off in offsprings:
            order_type = off['order_type']
            inchannels = off['inchannels']
            outchannels = off['outchannels']
            pool_type = off['pool_type']
            BasicBlock_amount = off['BasicBlock_amount']
            BU_outchannels = off['BU_outchannels']
            BU_conv_type = off['BU_conv_type']
            BU_kernel_size = off['BU_kernel_size']

            AU_conv_type = off['AU_conv_type']

            encode_layers = []
            size = [None for _ in range(self.init_max_len)]
            size[0] = self.init_size
            i = 0  # control size
            for type in order_type:
                if type == 1:
                    encode_layers.append(BasicUnit(int(inchannels[i]), int(outchannels[i]), int(BasicBlock_amount[i]) ,int(BU_conv_type[i]),int(BU_kernel_size[i])))
                    i = i + 1
                    size[i] = size[i - 1]
                elif type == 2:
                    encode_layers.append(PoolUnit(int(pool_type[i])))
                    i = i + 1
                    size[i] = int(size[i - 1] / 2)
                elif type == 3:
                    encode_layers.append(AttentionUnit(int(inchannels[i]),int(AU_conv_type[i])))
                    i = i + 1
                    size[i] = size[i - 1]

            off['encode_layers'] = encode_layers
            off['size'] = size
        return offsprings

class Population(object):
    def __init__(self, params):  #pop_size代表种群大小，max_len代表种群里每个个体最大长度
        self.params = params
        self.pop_size = params['pop_size']
        self.individuals = []

    def initialize_individuals(self):
        init_individual = Individual(self.params)
        for id_individual in range(self.pop_size):
            individual = init_individual.initialize_individual()
            self.individuals.append(individual)   # 这里相当于每生成一个个体字典，就往self.individuals这里塞
        return self.individuals

    def create_from_offspring(self, offsprings):
        off_individual = Individual(self.params)
        offsprings = off_individual.update_offspring_information(offsprings)
        return offsprings



