# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset,DataLoader

import utils
import  math
import numpy as np
# import data_loader
import argparse
from sklearn import preprocessing
from GlobalNet import GlobalNet
import scipy.io as sio
import os
import time
from sklearn.metrics import confusion_matrix
from torch.utils.data.sampler import SubsetRandomSampler
import copy
import matplotlib.pyplot as plt

class Matcifar(Dataset):
    def __init__(self, datasets):
        self.x_data = datasets['data'].transpose(3, 2, 1, 0)
        self.y_data = datasets['Labels']
        self.len = len(datasets['Labels'])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len

class Classification(object):
    def __init__(self , params):
        # 定义是否使用GPU
        self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

        self.Best_acc = -1.0
        self.pre_epoch = 0
        self.params = params

        self.cutout = False #放参数里去
        self.cutout_length = 2
        self.num_cut = 10
        self.EPOCH = params['EPOCH']
        self.BATCH_SIZE = params['BATCH_SIZE']
        self.num_class = params['num_class']

        self.proptionTest = 0.95  # 10% 90%
        # self.proptionVal = 0.5  # 50% 50%

        self.cache_placeholder = 1000
        # 定义损失函数
        criterion = nn.CrossEntropyLoss()  # 损失函数为交叉熵，多用于多分类问题
        self.criterion = criterion
        # self.nTrain = 200  # 迁移到params里去
        # self.nValid = 100  # 迁移到params里去
        self.grad_clip = 5
        self.lr = 0.01  #0.016 0.025
        self.weight_decay = 3e-4
        image_file = 'C:/Users/Administrator/PycharmProjects/LSY/Design_YG/IN/Indian_pines_corrected.mat'
        label_file = 'C:/Users/Administrator/PycharmProjects/LSY/Design_YG/IN/Indian_pines_gt.mat'
        image = sio.loadmat(image_file)  # shape=(145,145,200)
        Indian = image['indian_pines_corrected']  # shape=(145,145,200) ndarray
        label = sio.loadmat(label_file)
        GroundTruth = label['indian_pines_gt']  # shape=(145,145) ndarray
        Indian = (Indian - np.min(Indian)) / (np.max(Indian) - np.min(Indian))  # 数据归一化
        [nRow, nColumn, nBand] = Indian.shape  # 145 145 200

        num_class = int(np.max(GroundTruth))  # 16
        HSI_CLASSES = num_class  # 16

        HalfWidth = 16

        Wid = 2 * HalfWidth  # 32

        [row, col] = GroundTruth.shape  # 145 145

        Indian = utils.zeroPadding_3D(Indian, HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth)
        G = utils.zeroPadding_2D(GroundTruth, HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth)
        self.G = G
        [Row, Column] = np.nonzero(G)  # shape 10249 10249 #shape 7128 7128
        self.nSample = np.size(Row)  # 10249  #7128
        self.HalfWidth = 16
        # total = self.nTrain + self.nValid
        # nTest = (nSample - total)
        # batchtr = self.nTrain
        # numbatch1 = self.nTrain // batchtr
        #
        # batchva = 1000
        # numbatch2 = nTest // batchva

        HSI_CLASSES = num_class
        RandPerm = np.random.permutation(self.nSample)  # ndarray 7128  对nSample随机排序，低维排数高维排行(即第一维)
        labels_rows = [[] for i in range(num_class)]
        labels_columns = [[] for i in range(num_class)]
        for i in range(num_class):  # num_class
            for isample in range(self.nSample):
                if G[Row[isample], Column[isample]] == i + 1:
                    labels_rows[i].append(Row[isample])
                    labels_columns[i].append(Column[isample])

        imdb_train = {}
        imdb_valid = {}
        imdb_test = {}
        T_curr_pos = 0
        V_curr_pos = 0
        curr_pos = 0
        imdb_train['data'] = np.zeros([Wid, Wid, nBand, self.cache_placeholder], dtype=np.float32)
        imdb_train['Labels'] = np.zeros([self.cache_placeholder], dtype=np.int64)
        imdb_valid['data'] = np.zeros([Wid, Wid, nBand, self.cache_placeholder], dtype=np.float32)
        imdb_valid['Labels'] = np.zeros([self.cache_placeholder], dtype=np.int64)
        imdb_test['data'] = np.zeros([Wid, Wid, nBand, self.nSample], dtype=np.float32)
        imdb_test['Labels'] = np.zeros([self.nSample], dtype=np.int64)
        for i in range(self.num_class):
            index_list = []
            _rows = []
            _columns = []
            for index, value in enumerate(labels_rows[i]):
                index_list.append(index)
            random_index_list = np.random.permutation(index_list)
            for index_ in random_index_list:
                _rows.append(labels_rows[i][index_])
                _columns.append(labels_columns[i][index_])
            splid_0 = int(self.proptionTest * len(_rows))
            select_rows_train = _rows[: -splid_0]
            select_columns_train = _columns[: -splid_0]
            splid_1 = len(select_rows_train)
            select_rows_valid = _rows[-splid_0:][-splid_1:]
            select_columns_valid = _columns[-splid_0:][-splid_1:]

            select_rows_test = _rows[-splid_0:][:-splid_1]
            select_columns_test = _columns[-splid_0:][:-splid_1]
            for n in range(len(select_rows_train)):
                imdb_train['data'][:, :, :, T_curr_pos] = (
                    Indian[select_rows_train[n] - HalfWidth:select_rows_train[n] + HalfWidth,
                    select_columns_train[n] - HalfWidth:select_columns_train[n] + HalfWidth, :])
                imdb_train['Labels'][T_curr_pos] = G[select_rows_train[n], select_columns_train[n]]
                T_curr_pos += 1
            for m in range(len(select_rows_valid)):
                imdb_valid['data'][:, :, :, V_curr_pos] = (
                    Indian[select_rows_valid[m] - HalfWidth:select_rows_valid[m] + HalfWidth,
                    select_columns_valid[m] - HalfWidth:select_columns_valid[m] + HalfWidth, :])
                imdb_valid['Labels'][V_curr_pos] = G[select_rows_valid[m], select_columns_valid[m]]
                V_curr_pos += 1
            for s in range(len(select_rows_test)):
                imdb_test['data'][:, :, :, curr_pos] = (
                    Indian[select_rows_test[m] - HalfWidth:select_rows_test[m] + HalfWidth,
                    select_columns_test[m] - HalfWidth:select_columns_test[m] + HalfWidth, :])
                imdb_test['Labels'][curr_pos] = G[select_rows_test[m], select_columns_test[m]]
                curr_pos += 1
        index_T = np.max(np.nonzero(imdb_train['Labels']))  # extinct nozero
        imdb_train['data'] = imdb_train['data'][:, :, :, : index_T + 1]
        imdb_train['Labels'] = imdb_train['Labels'][: index_T + 1]
        imdb_train['Labels'] = imdb_train['Labels'] - 1  # 标签 1-16 变成了 0-15
        index_V = np.max(np.nonzero(imdb_valid['Labels']))
        imdb_valid['data'] = imdb_valid['data'][:, :, :, : index_V + 1]
        imdb_valid['Labels'] = imdb_valid['Labels'][: index_V + 1]
        imdb_valid['Labels'] = imdb_valid['Labels'] - 1  # 标签 1-16 变成了 0-15
        index_ = np.max(np.nonzero(imdb_test['Labels']))
        imdb_test['data'] = imdb_test['data'][:, :, :, : index_ + 1]
        imdb_test['Labels'] = imdb_test['Labels'][: index_ + 1]
        imdb_test['Labels'] = imdb_test['Labels'] - 1  # 标签 1-16 变成了 0-15
        # f2.write('Data is OK.' + '\n')
        self.imdb_train = imdb_train
        self.imdb_valid = imdb_valid
        self.imdb_test = imdb_test

    def test_end(self , Best_individual):
        # 参数设置,使得我们能够手动输入命令行参数，就是让风格变得和Linux命令行差不多
        parser = argparse.ArgumentParser(description='IN-HSI classification by PyTorch')  # py file.py -h
        parser.add_argument('--outf', default='./test_end_individual._model/',  # py file.py --outf
                                    help='folder to output images and model checkpoints')  # 输出结果保存路径
        args = parser.parse_args()
        if not os.path.exists(args.outf):
            os.makedirs(args.outf)
        print("The optimal individual information is as follows, and the accuracy has not been updated yet：")
        print(Best_individual)
        print("Starting from scratch and using test sets to verify accuracy")
        encode_information =  Best_individual['encode_layers']
        End_out_channels =  Best_individual['outchannels']
        size =  Best_individual['size']
        net = GlobalNet(encode_information, End_out_channels, size, self.nBand, self.num_class).to(self.device)
        print("Start Training, Globalnet!")
        optimizer = torch.optim.Adam(net.parameters(), lr=self.lr,
                                     weight_decay=self.weight_decay)  # lr and weight_decay迁移到属性
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, self.EPOCH // 5, 0.5)
        # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, self.EPOCH // 5, 0.5)
        min_val_obj = 100
        for epoch in range(self.pre_epoch, self.EPOCH):
            train_dataset = Matcifar(datasets=self.imdb_train)  # 32 32 520
            valid_dataset = Matcifar(datasets=self.imdb_valid)  # 32 32 520

            train_queue = torch.utils.data.DataLoader(train_dataset, batch_size=self.BATCH_SIZE,
                                                      shuffle=True, num_workers=0)
            valid_queue = torch.utils.data.DataLoader(valid_dataset, batch_size=self.BATCH_SIZE,
                                                      shuffle=True, num_workers=0)
            tic = time.time()
            top1_avg_train, objs_avg_train, tar_train, pre_train = self.train(train_queue, net, optimizer)

            top1_avg_valid, objs_avg_valid, tar_valid, pre_valid = self.infer(valid_queue, net)
            toc = time.time()
            scheduler.step()
            print("[Epoch: %d]|train_acc:%f, train_loss:%f, valid_acc:%f, valid_loss:%f, time = %f" %
                  (epoch + 1, top1_avg_train, objs_avg_train, top1_avg_valid, objs_avg_valid, toc - tic))
            if epoch > self.EPOCH * 0.8 and objs_avg_valid < min_val_obj:
                min_val_obj = objs_avg_valid
                utils.save(net, './test_end_individual._model/Best_net.pth')

            # test
            if epoch == (self.EPOCH - 1):
                # imdb_test['data'] = np.zeros([Wid, Wid, nBand, nSample], dtype=np.float32)
                # imdb_test['Labels'] = np.zeros([nSample], dtype=np.int64)
                # for iSample in range(nSample):
                #     imdb_test['data'][:, :, :, iSample] = Indian[Row[iSample] - HalfWidth: Row[iSample]
                #                                                                       + HalfWidth,
                #                                      Column[iSample] - HalfWidth:Column[iSample] + HalfWidth,
                #                                      :]  # Indian[Row[63] - 16:Row[63]+16,Colum[63]-16:Column[63]+16,:]
                #     imdb_test['Labels'][iSample] = G[Row[iSample], Column[iSample]].astype(np.int64)
                #
                # imdb_test['Labels'] = imdb_test['Labels'] - 1

                # index_ = np.max(np.nonzero(imdb_test['Labels']))
                # imdb_test['data'] = imdb_test['data'][:, :, :, : index_ + 1]
                # imdb_test['Labels'] = imdb_test['Labels'][: index_ + 1]
                # imdb_test['Labels'] = imdb_test['Labels'] - 1  # 标签 1-16 变成了 0-15
                print("Waiting Test!" + '\n')
                utils.load(net, './test_end_individual._model/Best_net.pth')
                labels_test = np.array([], dtype=np.int64)
                predict_test = np.array([], dtype=np.int64)

                for i in range(1):
                    test_dataset = Matcifar(datasets=self.imdb_test)
                    test_queue = torch.utils.data.DataLoader(test_dataset, batch_size=self.BATCH_SIZE, shuffle=False,
                                                             num_workers=0)

                    top1_test, objs_test, tar_test, pre_test = self.infer(test_queue, net)

                    labels_test = np.append(labels_test, tar_test)
                    predict_test = np.append(predict_test, pre_test)

                print('The Best individual |test_datasets | top1.avg:%f, objs.avg:%f' % (top1_test, objs_test))
                matrix_ = confusion_matrix(labels_test, predict_test)
                OA_, AA_mean_, Kappa_, AA_ = self.cal_results(matrix_)
                print('OA：%f, AA_mean：%f, Kappa：%f ,Acuracy_per_class: %s' % (OA_, AA_mean_, Kappa_, AA_))

                labels_test = labels_test + 1
                predict_test = predict_test + 1

                MASK = self.G[self.HalfWidth + 1: -1 - self.HalfWidth + 1, self.HalfWidth + 1: -1 - self.HalfWidth + 1]
                NEW_MASK = copy.deepcopy(MASK)
                NEW_MASK = np.hstack(NEW_MASK)
                i = 0
                for index, element in enumerate(NEW_MASK):
                    if element != 0:
                        NEW_MASK[index] = labels_test[i]
                        i = i + 1
                    else:
                        continue
                # predict_test = predict_test + 1
                # # prediction_ = copy.deepcopy(predict_test)
                # c = np.hstack(G)
                # k = 0
                # for index , element in enumerate(c):
                #     if element!=0:
                #         c[index] = predict_test[k]
                #         k = k + 1
                #     else:
                #         continue
                # c = np.vstack(c).reshape(145, 145)
                # c = c - 1
                # m = copy.deepcopy(GroundTruth)
                # m = m - 1
                # m[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1] = c[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1]
                # m = np.hstack(k)

                # x = np.ravel(int(k))  # （21025）
                # print x
                y = np.zeros((NEW_MASK.shape[0], 3))  # 21025，3

                for index, item in enumerate(NEW_MASK):
                    if item == 0:
                        y[index] = np.array([0, 0, 0]) / 255.  # 8, 46, 84  #124, 252, 0 靛青
                    if item == 1:
                        y[index] = np.array([77, 64, 0]) / 255.
                    if item == 2:
                        y[index] = np.array([191, 191, 191]) / 255.
                    if item == 3:
                        y[index] = np.array([255, 0, 0]) / 255.
                    if item == 4:
                        y[index] = np.array([0, 255, 255]) / 255.
                    if item == 5:
                        y[index] = np.array([255, 102, 204]) / 255.
                    if item == 6:
                        y[index] = np.array([255, 153, 0]) / 255.
                    if item == 7:
                        y[index] = np.array([1, 0, 255]) / 255.
                    if item == 8:
                        y[index] = np.array([255, 0, 255]) / 255.
                    if item == 9:
                        y[index] = np.array([0, 0, 128]) / 255.
                    if item == 10:
                        y[index] = np.array([128, 128, 128]) / 255.
                    if item == 11:
                        y[index] = np.array([128, 0, 0]) / 255.
                    if item == 12:
                        y[index] = np.array([128, 128, 0]) / 255.
                    if item == 13:
                        y[index] = np.array([1, 128, 0]) / 255.
                    if item == 14:
                        y[index] = np.array([0, 128, 128]) / 255.
                    if item == 15:
                        y[index] = np.array([0, 0, 128]) / 255.
                    if item == 16:
                        y[index] = np.array([128, 0, 128]) / 255.

                y_re = np.reshape(y, (MASK.shape[0], MASK.shape[1], 3))
                # y_re = np.reshape(NEW_MASK, (GroundTruth.shape[0], GroundTruth.shape[1], 3))

                self.classification_map(y_re, MASK, 24,"C:/Users/Administrator/PycharmProjects/LSY/Design_YG/Control_/run_Indian/w.png")

    def train(self , train_queue, model, optimizer):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        tar = np.array([])
        pre = np.array([])
        model.train()
        for i, data in enumerate(train_queue):
            # scheduler.step()
            image_, label_ = data  # image torch.size([64,32,32,200]) label  32
            image_ = Variable(image_).to(self.device)
            label_ = Variable(label_).to(self.device)
            optimizer.zero_grad()
            n_ = image_.size(0)
            outputs_ = model(image_)
            loss_ = self.criterion(outputs_, label_)
            loss_.backward()
            nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
            optimizer.step()
            prec_, t_, p_ = utils.accuracy(outputs_, label_, topk=(1,))
            objs.update(loss_.item(), n_)
            top1.update(prec_[0].item(), n_)
            tar = np.append(tar, t_.data.cpu().numpy())
            pre = np.append(pre, p_.data.cpu().numpy())
        return top1.avg, objs.avg, tar, pre

    def infer(self , valid_queue, model):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        model.eval()
        tar = np.array([])
        pre = np.array([])
        with torch.no_grad():
            for i, data in enumerate(valid_queue):
                image, label = data
                image = Variable(image).to(self.device)
                label = Variable(label).to(self.device)
                n = image.size(0)
                outputs =model(image)
                loss = self.criterion(outputs, label)
                prec1_, t_, p_ = utils.accuracy(outputs, label, topk=(1,))
                objs.update(loss.item(), n)
                top1.update(prec1_[0].item(), n)
                tar = np.append(tar, t_.data.cpu().numpy())
                pre = np.append(pre, p_.data.cpu().numpy())

        return top1.avg, objs.avg, tar, pre

    def cal_results(self , matrix):
        shape = np.shape(matrix)
        number = 0
        sum = 0
        AA = np.zeros([shape[0]], dtype=np.float) #AA[0] = 0,
        for i in range(shape[0]):
            number += matrix[i, i] #number=0,672,
            if np.sum(matrix[i, :]) == 0:
                AA[i]= 0
            elif np.sum(matrix[i, :]) != 0:
                AA[i] = matrix[i, i] / np.sum(matrix[i, :])
            sum += np.sum(matrix[i, :]) * np.sum(matrix[:, i])#sum = 0
        OA = number / np.sum(matrix)
        AA_mean = np.mean(AA)
        pe = sum / (np.sum(matrix) ** 2)
        Kappa = (OA - pe) / (1 - pe)
        return OA, AA_mean, Kappa, AA

    def classification_map(self, map, groundTruth, dpi, savePath):

        fig = plt.figure(frameon=False)
        fig.set_size_inches(groundTruth.shape[1] * 2.0 / dpi, groundTruth.shape[0] * 2.0 / dpi)

        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        fig.add_axes(ax)

        ax.imshow(map, aspect='auto')
        fig.savefig(savePath, dpi=dpi)

        return 0

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

class BasicBlock(nn.Module):
    def __init__(self, in_planes, planes, conv_type , kernel_size,  stride=1):
        super(BasicBlock, self).__init__()
        self.expansion =1
        if conv_type == 1:
            if kernel_size == 3:  self.padding = 2
            elif kernel_size == 5: self.padding = 4
            self.Conv = nn.Sequential(          #DilConv
                nn.ReLU(inplace=False),  # 这里的stride不一定是1
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          dilation=2,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),    )

        elif conv_type == 2:
            if kernel_size == 3: self.padding = 1
            elif kernel_size == 5: self.padding = 2
            self.Conv = nn.Sequential(         #SepConv
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=self.padding,
                          groups=in_planes, bias=False),
                nn.Conv2d(in_planes, planes, kernel_size=1, padding=0, bias=False),
                nn.BatchNorm2d(planes, affine=False),
            )
        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.ReLU(inplace=False),
                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion * planes)
            )

    def forward(self, x):
        out = self.Conv(x)
        out += self.shortcut(x)
        # out = F.relu(out)
        return out

class BasicUnit(nn.Module):
    def __init__(self,  in_channel, out_channel , amount ,conv_type , kernel_size):   #数量是控制块的多少，输入的通道数和输出的通道数得出来
        super(BasicUnit, self).__init__()
        self.type = 1  #这里单独把type设置为1，暂时用不上
        self.inchannel = in_channel
        self.conv_type_ = conv_type
        self.kernel_size_ = kernel_size
        self.layer = self.make_layer(BasicBlock, out_channel, amount, conv_type , kernel_size,  stride=1)    #这里只需要使用输出通道控制后面

    def make_layer(self, block, channels, num_blocks, _conv_type_ , _kernel_size_,  stride):
        strides = [stride] + [1] * (num_blocks - 1)  # strides=[1,1]
        layers = []
        for stride in strides:
            layers.append(block(self.inchannel, channels,_conv_type_ , _kernel_size_, stride))
            self.inchannel = channels
        return nn.Sequential(*layers)

    def forward(self,x):
        out = self.layer(x)  ##128*3*32*32-》128*64*32*32

        return out

class PoolUnit(nn.Module):
    def __init__(self, pool_type):
        super(PoolUnit, self).__init__()
        self.type = 2    # 这里只代表类型为1，后面不一定用
        self.pool_type = pool_type

        if self.pool_type == 1:
            self.select_pooltype = nn.AvgPool2d(3, stride=2, padding=1)
              # 自适应，特征图减半
        elif self.pool_type == 2:
            self.select_pooltype = nn.MaxPool2d(3, stride=2, padding=1)

    def forward(self, x):
        out = self.select_pooltype(x)
        return out

if __name__ == '__main__':
    params = {
        'prob_crossover': 0.9,  # 0.9
        'prob_mutation': 0.2,  # 0.2 #to test mutation component

        'BasicBlock_min_limit': 3,  # 3 10
        'BasicBlock_max_limit': 10,  # 即最大生成10个ResBlock
        'BU_outchannels_list': [64, 128, 256],
        'BU_conv_type_list': [1, 2],  # 1 and 2 is symbol of Dilconv and Sepconv respectively
        'BU_kernel_size_list': [3, 5],

        'BU_minlimit': 1,
        'BU_maxlimit': 4,

        'PU_minlimit': 1,
        'PU_maxlimit': 4,  # 这里估计要改，调参的话，现在池化层是   size/2

        'init_max_len': 8,
        'off_max_len': 12,  # 应该设大点比如15或者20
        'init_inchannels': 200,
        'Max_out_channels': 64,  # 用在首序列初始化和RB的内部变异

        'init_size': 32,  # 64  32
        'pop_size': 20,
        'max_gen': 10,
        'EPOCH': 5,  # 150
        'BATCH_SIZE': 32,
        'num_class': 16,
    }

    individual = {}

    size = [32, 32, 32, 16, 8, 4, 2, None, None, None, None, None, None, None, None, None, None, None, None, None]
    inchannels = [200, 256, 128, 128, 128, 128]
    outchannels = [256, 128, 128, 128, 128, 128]
    pool_type = [1, 1, 2, 2, 2, 1]
    BasicBlock_amount = [5, 9, 4, 4, 4, 9]
    BU_outchannels = [256, 128, 128, 128, 128, 128]
    BU_conv_type = [1, 1, 2, 2, 2, 1]
    BU_kernel_size = [5, 5, 3, 3, 3, 5]
    order_type = [1, 1, 2, 2, 2, 2]

    encode_layers = []  # store per-layer encoded information
    i = 0

    for type in order_type:  # for type in order_type
        if type == 1:
            outchannels[i] = BU_outchannels[i]
            encode_layers.append(BasicUnit(int(inchannels[i]), int(outchannels[i]), int(BasicBlock_amount[i]), int(BU_conv_type[i]),
                          int(BU_kernel_size[i])))
            i = i + 1

        elif type == 2:
            encode_layers.append(PoolUnit(int(pool_type[i])))
            i = i + 1


    individual['inchannels'] = inchannels
    individual['outchannels'] = outchannels
    individual['pool_type'] = pool_type
    individual['BasicBlock_amount'] = BasicBlock_amount
    individual['BU_outchannels'] = BU_outchannels
    individual['BU_conv_type'] = BU_conv_type
    individual['BU_kernel_size'] = BU_kernel_size

    individual['acc'] = -1.0
    individual['size'] = size  # Be used fully layer
    individual['encode_layers'] = encode_layers
    individual['order_type'] = order_type

    classification = Classification(params)
    classification.test_end(individual)