﻿# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader

import utils
import math
import numpy as np
# import data_loader
import argparse
from sklearn import preprocessing
from GlobalNet import GlobalNet
import scipy.io as sio
import os
import time

from sklearn import metrics
from sklearn.metrics import confusion_matrix
from torch.utils.data.sampler import SubsetRandomSampler


class Matcifar(Dataset):
    def __init__(self, datasets):
        self.x_data = datasets['data'].transpose(3, 2, 1, 0)
        self.y_data = datasets['Labels']
        self.len = len(datasets['Labels'])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len


class FitnessEvaluate(object):
    def __init__(self, params):
        # 定义是否使用GPU
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        self.Best_acc = -1.0
        self.pre_epoch = 0
        self.params = params

        self.cutout = False  # 放参数里去
        self.cutout_length = 2
        self.num_cut = 10
        self.EPOCH = params['EPOCH']
        self.BATCH_SIZE = params['BATCH_SIZE']
        self.num_class = params['num_class']

        self.proptionTest = 0.95  # 10% 90%
        # self.proptionVal = 0.5  # 50% 50%

        self.cache_placeholder = 1000
        # 定义损失函数
        criterion = nn.CrossEntropyLoss()  # 损失函数为交叉熵，多用于多分类问题
        self.criterion = criterion
        # self.nTrain = 200  # 迁移到params里去
        # self.nValid = 100  # 迁移到params里去
        self.grad_clip = 5
        self.lr = 0.01  # 0.016 0.025
        self.weight_decay = 3e-4
        # 初始化文件
        self.init_file()

        image_file = '/home/liangshaoyang/Documents/PycharmProjects/IN/Indian_pines_corrected.mat'
        label_file = '/home/liangshaoyang/Documents/PycharmProjects/IN/Indian_pines_gt.mat'
        image = sio.loadmat(image_file)  # shape=(145,145,200)
        Indian = image['indian_pines_corrected']  # shape=(145,145,200) ndarray
        label = sio.loadmat(label_file)
        GroundTruth = label['indian_pines_gt']  # shape=(145,145) ndarray
        Indian = (Indian - np.min(Indian)) / (np.max(Indian) - np.min(Indian))  # 数据归一化
        [nRow, nColumn, nBand] = Indian.shape  # 145 145 200
        self.nBand = nBand
        self.num_class = int(np.max(GroundTruth))  # 16
        HSI_CLASSES = self.num_class  # 16

        HalfWidth = 16
        Wid = 2 * HalfWidth  # 32

        [row, col] = GroundTruth.shape  # 145 145

        Indian = utils.zeroPadding_3D(Indian, HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth)
        G = utils.zeroPadding_2D(GroundTruth, HalfWidth + 1, HalfWidth, HalfWidth + 1, HalfWidth)

        [Row, Column] = np.nonzero(G)  # shape 10249 10249 #shape 7128 7128
        nSample = np.size(Row)  # 10249  #7128

        labels_rows, labels_columns = [[] for i in range(self.num_class)], [[] for i in range(self.num_class)]
        for i in range(self.num_class):  # rows and columns of all-class-labels
            for isample in range(nSample):
                if G[Row[isample], Column[isample]] == i + 1:
                    labels_rows[i].append(Row[isample])
                    labels_columns[i].append(Column[isample])
        imdb_train, imdb_valid, imdb_test = {}, {}, {}
        imdb_train['data'], imdb_train['Labels'] = np.zeros([Wid, Wid, nBand, self.cache_placeholder],
                                                            dtype=np.float32), np.zeros([self.cache_placeholder],
                                                                                        dtype=np.int64)
        imdb_valid['data'], imdb_valid['Labels'] = np.zeros([Wid, Wid, nBand, self.cache_placeholder],
                                                            dtype=np.float32), np.zeros([self.cache_placeholder],
                                                                                        dtype=np.int64)
        imdb_test['data'], imdb_test['Labels'] = np.zeros([Wid, Wid, nBand, nSample], dtype=np.float32), np.zeros(
            [nSample], dtype=np.int64)
        select_rows_train, select_columns_train = {}, {}
        select_rows_valid, select_columns_valid = {}, {}
        select_rows_test, select_columns_test = {}, {}
        for i in range(self.num_class):
            index_list = []
            _rows = []
            _columns = []
            for index, value in enumerate(labels_rows[i]):
                index_list.append(index)
            random_index_list = np.random.permutation(index_list)
            for index_ in random_index_list:
                _rows.append(labels_rows[i][index_])
                _columns.append(labels_columns[i][index_])

            splid_0 = int(self.proptionTest * len(_rows))
            select_rows_train[i] = _rows[: -splid_0]
            select_columns_train[i] = _columns[: -splid_0]

            splid_1 = len(select_rows_train[i])
            select_rows_valid[i] = _rows[-splid_0:][-splid_1:]
            select_columns_valid[i] = _columns[-splid_0:][-splid_1:]

            select_rows_test[i] = _rows[-splid_0:][:-splid_1]
            select_columns_test[i] = _columns[-splid_0:][:-splid_1]
        train_rows_indices, train_columns_indices = [], []
        valid_rows_indices, valid_columns_indices = [], []
        test_rows_indices, test_columns_indices = [], []
        for i in range(self.num_class):
            train_rows_indices += select_rows_train[i]
            train_columns_indices += select_columns_train[i]
            valid_rows_indices += select_rows_valid[i]
            valid_columns_indices += select_columns_valid[i]
            test_rows_indices += select_rows_test[i]
            test_columns_indices += select_columns_test[i]
        RandPerm1, RandPerm2, RandPerm3 = np.random.permutation(len(train_rows_indices)), np.random.permutation(
            len(valid_rows_indices)), np.random.permutation(len(test_rows_indices))
        for n in range(len(train_rows_indices)):
            yy1 = (Indian[train_rows_indices[RandPerm1[n]] - HalfWidth:train_rows_indices[RandPerm1[n]] + HalfWidth,
                   train_columns_indices[RandPerm1[n]] - HalfWidth:train_columns_indices[RandPerm1[n]] + HalfWidth, :])
            if self.cutout:
                xx1 = utils.cutout(yy1, self.cutout_length, self.num_cut)
                imdb_train['data'][:, :, :, n] = xx1
            else:
                imdb_train['data'][:, :, :, n] = yy1
            imdb_train['Labels'][n] = G[train_rows_indices[RandPerm1[n]], train_columns_indices[RandPerm1[n]]]
        for m in range(len(valid_rows_indices)):
            imdb_valid['data'][:, :, :, m] = (
            Indian[valid_rows_indices[RandPerm2[m]] - HalfWidth:valid_rows_indices[RandPerm2[m]] + HalfWidth,
            valid_columns_indices[RandPerm2[m]] - HalfWidth:valid_columns_indices[RandPerm2[m]] + HalfWidth, :])
            imdb_valid['Labels'][m] = G[valid_rows_indices[RandPerm2[m]], valid_columns_indices[RandPerm2[m]]]
        for s in range(len(test_rows_indices)):
            imdb_test['data'][:, :, :, s] = (
            Indian[test_rows_indices[RandPerm3[s]] - HalfWidth:test_rows_indices[RandPerm3[s]] + HalfWidth,
            test_columns_indices[RandPerm3[s]] - HalfWidth:test_columns_indices[RandPerm3[s]] + HalfWidth, :])
            imdb_test['Labels'][s] = G[test_rows_indices[RandPerm3[s]], test_columns_indices[RandPerm3[s]]]
        index_T = np.max(np.nonzero(imdb_train['Labels']))  # extinct nozero
        imdb_train['data'], imdb_train['Labels'] = imdb_train['data'][:, :, :, : index_T + 1], (
                    imdb_train['Labels'][: index_T + 1] - 1)  # label -1 from 1-16 to 0-15
        index_V = np.max(np.nonzero(imdb_valid['Labels']))
        imdb_valid['data'], imdb_valid['Labels'] = imdb_valid['data'][:, :, :, : index_V + 1], (
                    imdb_valid['Labels'][: index_V + 1] - 1)
        index_ = np.max(np.nonzero(imdb_test['Labels']))
        imdb_test['data'], imdb_test['Labels'] = imdb_test['data'][:, :, :, : index_ + 1], (
                    imdb_test['Labels'][: index_ + 1] - 1)
        # f2.write('Data is OK.' + '\n')
        self.imdb_train, self.imdb_valid, self.imdb_test = imdb_train, imdb_valid, imdb_test

    def train_and_eval(self, individuals_, gen_now, mutation_flag=0):
        # 参数设置,使得我们能够手动输入命令行参数，就是让风格变得和Linux命令行差不多
        parser = argparse.ArgumentParser(description='IN-HSI classification by PyTorch')  # py file.py -h
        parser.add_argument('--outf', default='./superior_Individual._model/',  # py file.py --outf
                            help='folder to better individual model in train step')  # 输出结果保存路径
        args = parser.parse_args()
        count = 1
        with open("superior_individual_information.txt", "a") as f_0:
            f_0.write(
                "The %sst population will be evaluated，The updates of the better individual information are shown below" % (
                    gen_now) + '\n')
            for individual_ in individuals_:
                encode_information = individual_['encode_layers']
                End_out_channels = individual_['outchannels']
                size = individual_['size']
                net = GlobalNet(encode_information, End_out_channels, size, self.nBand, self.num_class).to(self.device)
                if not os.path.exists(args.outf):
                    os.makedirs(args.outf)
                with open("Individuals_train_log.txt", "a") as f1:
                    print('\n' + "Start Training, Globalnet!")
                    f1.write('\n' + "Start Training, Globalnet!" + '\n')
                    if mutation_flag == 0:
                        print("The %sst individual from %sst generation population will be evaluate" % (count, gen_now))
                        f1.write("The %sst individual from %sst generation population will be evaluate" % (
                        count, gen_now) + '\n')
                    elif mutation_flag == 1:
                        print("The %sst individual from %sst population after mutation will be evaluate" % (
                        count, gen_now))
                        f1.write(
                            "The %sst individual from %sst generation population after mutation will be evaluate" % (
                            count, gen_now) + '\n')
                    elif float(individual_['acc']) > 0:
                        print(
                            "The %sst individual from %sst generation population without involving in mutaion process don't need evaluate " % (
                                count, gen_now))
                        f1.write(
                            "The %sst individual from %sst generation population without involving in mutaion process don't need evaluate " % (
                                count, gen_now) + '\n')
                        continue
                    optimizer = torch.optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
                    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, self.EPOCH // 5, 0.5)
                    # RandPerm = np.random.permutation(nSample)  # ndarray 7128  对nSample随机排序，低维排数高维排行(即第一维)

                    for epoch in range(self.pre_epoch, self.EPOCH):
                        train_dataset = Matcifar(datasets=self.imdb_train)  # 32 32 520
                        valid_dataset = Matcifar(datasets=self.imdb_valid)  # 32 32 520

                        train_queue = torch.utils.data.DataLoader(train_dataset, batch_size=self.BATCH_SIZE,
                                                                  shuffle=True, pin_memory=True, num_workers=0)
                        valid_queue = torch.utils.data.DataLoader(valid_dataset, batch_size=self.BATCH_SIZE,
                                                                  shuffle=True, pin_memory=True, num_workers=0)
                        # Only cache one epoch
                        predict_ = np.array([], dtype=np.int64)
                        labels_ = np.array([], dtype=np.int64)

                        tic = time.time()
                        top1_avg_train, objs_avg_train, tar_train, pre_train = self.train(train_queue, net, optimizer)

                        top1_avg_valid, objs_avg_valid, tar_valid, pre_valid = self.infer(valid_queue, net)
                        toc = time.time()

                        scheduler.step()
                        print("[Epoch: %d]|train_acc:%f, train_loss:%f, valid_acc:%f, valid_loss:%f, time = %f" %
                              (epoch + 1, top1_avg_train, objs_avg_train, top1_avg_valid, objs_avg_valid, toc - tic))
                        f1.write("[Epoch: %d]|train_acc:%f, train_loss:%f, valid_acc:%f, valid_loss:%f, time = %f" %
                                 (epoch + 1, top1_avg_train, objs_avg_train, top1_avg_valid, objs_avg_valid,
                                  toc - tic) + '\n')

                    labels_ = np.append(labels_, tar_valid)
                    predict_ = np.append(predict_, pre_valid)

                    OA_valid = metrics.accuracy_score(predict_, labels_)
                    acc = OA_valid
                    individual_['acc'] = float(acc)
                    matrix_valid = metrics.confusion_matrix(predict_, labels_)
                    AA_valid, AA_mean_valid = utils.AA_andEachClassAccuracy(matrix_valid)
                    Kappa_valid = metrics.cohen_kappa_score(predict_, labels_)

                    # OA_V = sum(map(lambda x, y: 1 if x == y else 0, predict_, labels_)) / (index_V + 1)
                    # acc = OA_V
                    # individual_['acc'] = float(acc)
                    # matrix_valid = confusion_matrix(labels_, predict_)
                    # OA_valid, AA_mean_valid, Kappa_valid, AA_valid = self.cal_results(matrix_valid)

                    print('OA：%f, AA_mean：%f, Kappa：%f , Acuracy_per_class: %s' % (OA_valid, AA_mean_valid,
                                                                                   Kappa_valid, AA_valid))
                    f1.write('OA：%f, AA_mean：%f, Kappa：%f , Acuracy_per_class: %s' % (OA_valid, AA_mean_valid,
                                                                                      Kappa_valid, AA_valid) + '\n')
                    print('Updating superior model information......')
                    f1.write('Updating superior model information......' + '\n')
                    print("Test Finished, TotalEPOCH=%d" % self.EPOCH + '\n')
                    f1.write('\n' + "Test Finished, TotalEPOCH=%d" % self.EPOCH + '\n')
                    f1.flush()
                    if float(acc) >= float(self.Best_acc):
                        self.Best_acc = float(acc)
                        torch.save(net.state_dict(), '%s/net_%03d_%03d.pth' % (args.outf, gen_now, count))
                        f_0.write(
                            'superior_individual_now| param size = %fMB' % utils.count_parameters_in_MB(net) + '\n')
                        f_0.write('OA：%f, AA_mean：%f, Kappa：%f , Acuracy_per_class: %s' % (OA_valid, AA_mean_valid,
                                                                                           Kappa_valid,
                                                                                           AA_valid) + '\n')
                        for key, value in individual_.items():
                            if key == 'acc':
                                value = float(value)
                            f_0.write(str(key) + ':')
                            f_0.write(str(value))
                            f_0.write('\n')
                            f_0.flush()
                count += 1  # individual numerical order add one
        return individuals_

    def test_end(self, Best_individual):
        # 参数设置,使得我们能够手动输入命令行参数，就是让风格变得和Linux命令行差不多
        parser = argparse.ArgumentParser(description='IN-HSI classification by PyTorch')  # py file.py -h
        parser.add_argument('--outf', default='./Best_individual._model/',  # py file.py --outf
                            help='folder to output images and model checkpoints')  # 输出结果保存路径
        args = parser.parse_args()
        print("The optimal individual information is as follows, and the accuracy has not been updated yet：")
        print(Best_individual)
        print("Starting from scratch and using test sets to verify accuracy")
        encode_information = Best_individual['encode_layers']
        End_out_channels = Best_individual['outchannels']
        size = Best_individual['size']
        net = GlobalNet(encode_information, End_out_channels, size, self.nBand, self.num_class).to(self.device)
        if not os.path.exists(args.outf):
            os.makedirs(args.outf)

        with open("Best_individual_train_log.txt", "a")as f2:
            print("Start Training, Globalnet!")
            f2.write("Start Training, Globalnet!" + '\n')
            f2.write("Starting from scratch and using test sets to verify accuracy" + '\n')
            optimizer = torch.optim.Adam(net.parameters(), lr=self.lr,
                                         weight_decay=self.weight_decay)  # lr and weight_decay迁移到属性
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer, self.EPOCH // 5, 0.5)
            # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, self.EPOCH // 5, 0.5)
            # RandPerm = np.random.permutation(nSample)  # ndarray 7128  对nSample随机排序，低维排数高维排行(即第一维)
            min_val_obj = 100

            for epoch in range(self.pre_epoch, self.EPOCH):

                train_dataset = Matcifar(datasets=self.imdb_train)  # 32 32 520
                valid_dataset = Matcifar(datasets=self.imdb_valid)  # 32 32 520

                train_queue = torch.utils.data.DataLoader(train_dataset, batch_size=self.BATCH_SIZE,
                                                          shuffle=True, num_workers=0)
                valid_queue = torch.utils.data.DataLoader(valid_dataset, batch_size=self.BATCH_SIZE,
                                                          shuffle=True, num_workers=0)
                tic = time.time()
                top1_avg_train, objs_avg_train, tar_train, pre_train = self.train(train_queue, net, optimizer)

                top1_avg_valid, objs_avg_valid, tar_valid, pre_valid = self.infer(valid_queue, net)
                toc = time.time()
                scheduler.step()
                print("[Epoch: %d]|train_acc:%f, train_loss:%f, valid_acc:%f, valid_loss:%f, time = %f" %
                      (epoch + 1, top1_avg_train, objs_avg_train, top1_avg_valid, objs_avg_valid, toc - tic))
                f2.write("[Epoch: %d]|train_acc:%f, train_loss:%f, valid_acc:%f, valid_loss:%f, time = %f" %
                         (epoch + 1, top1_avg_train, objs_avg_train, top1_avg_valid, objs_avg_valid, toc - tic) + '\n')
                if epoch > self.EPOCH * 0.8 and objs_avg_valid < min_val_obj:
                    min_val_obj = objs_avg_valid
                    utils.save(net, './Best_individual._model/Best_net.pth')
                # test
                if epoch == (self.EPOCH - 1):
                    print("Waiting Test!" + '\n')
                    f2.write("Waiting Test!" + '\n')
                    utils.load(net, './Best_individual._model/Best_net.pth')
                    labels_test = np.array([], dtype=np.int64)
                    predict_test = np.array([], dtype=np.int64)

                    test_dataset = Matcifar(datasets=self.imdb_test)
                    test_queue = torch.utils.data.DataLoader(test_dataset, batch_size=self.BATCH_SIZE, shuffle=False,
                                                             num_workers=0)  # batch_size = 50

                    top1_test, objs_test, tar_test, pre_test = self.infer(test_queue, net)

                    labels_test = np.append(labels_test, tar_test)
                    predict_test = np.append(predict_test, pre_test)

                    print('The Best individual |test_datasets | top1.avg:%f, objs.avg:%f' % (top1_test, objs_test))
                    # 将每次测试结果实时写入acc.txt文件中
                    f2.write(
                        'The Best individual |test_datasets | top1.avg:%f, objs.avg:%f' % (top1_test, objs_test) + '\n')

                    OA_ = metrics.accuracy_score(predict_test, labels_test)
                    Best_individual_acc = OA_
                    Best_individual['acc'] = float(Best_individual_acc)
                    matrix_ = metrics.confusion_matrix(predict_test, labels_test)
                    AA_, AA_mean_ = utils.AA_andEachClassAccuracy(matrix_)
                    Kappa_ = metrics.cohen_kappa_score(predict_test, labels_test)
                    # A_Test = sum(map(lambda x, y: 1 if x == y else 0, predict_test, labels_test)) / (index_ + 1)
                    # Best_individual_acc = OA_Test
                    # Best_individual['acc'] = float(Best_individual_acc)
                    # matrix_ = confusion_matrix(labels_test, predict_test)
                    # OA_, AA_mean_, Kappa_, AA_ = self.cal_results(matrix_)
                    print('OA：%f, AA_mean：%f, Kappa：%f ,Acuracy_per_class: %s' % (OA_, AA_mean_, Kappa_, AA_))
                    f2.write('OA：%f, AA_mean：%f, Kappa：%f ,Acuracy_per_class: %s' % (OA_, AA_mean_, Kappa_, AA_) + '\n')
                    print('The Best individual| param size = %fMB' % utils.count_parameters_in_MB(net) + '\n')
                    f2.write('The Best individual| param size = %fMB' % utils.count_parameters_in_MB(net) + '\n')
                    # print('Saving model......')
                    # f2.write('Saving model......' + '\n')
                    # utils.save(net, './Best_individual._model/Best_net.pth')
                    print("Test Finished, TotalEPOCH=%d" % self.EPOCH + '\n')
                    f2.write('\n' + "Test Finished, TotalEPOCH=%d" % self.EPOCH + '\n')
                    f2.flush()
        with open("Best_individual_information.txt", "a") as f_2:
            f_2.write('The Best individual| OA：%f, AA_mean：%f, Kappa：%f ,Acuracy_per_class: %s' % (
                OA_, AA_mean_, Kappa_, AA_) + '\n')
            f_2.write('The Best individual| param size = %fMB' % utils.count_parameters_in_MB(net) + '\n')
            for key, value in Best_individual.items():
                if key == 'acc':
                    value = float(value)
                f_2.write(str(key) + ':')
                f_2.write(str(value))
                f_2.write('\n')
                f_2.flush()

    def train(self, train_queue, model, optimizer):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        tar = np.array([])
        pre = np.array([])
        model.train()
        for i, data in enumerate(train_queue):
            # scheduler.step()
            image_, label_ = data  # image torch.size([64,32,32,200]) label  32
            image_ = Variable(image_).to(self.device)
            label_ = Variable(label_).to(self.device)
            optimizer.zero_grad()
            n_ = image_.size(0)
            outputs_ = model(image_)
            loss_ = self.criterion(outputs_, label_)
            loss_.backward()
            nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
            optimizer.step()
            prec_, t_, p_ = utils.accuracy(outputs_, label_, topk=(1,))
            objs.update(loss_.item(), n_)
            top1.update(prec_[0].item(), n_)
            tar = np.append(tar, t_.data.cpu().numpy())
            pre = np.append(pre, p_.data.cpu().numpy())
        return top1.avg, objs.avg, tar, pre

    def infer(self, valid_queue, model):
        objs = utils.AvgrageMeter()
        top1 = utils.AvgrageMeter()
        model.eval()
        tar = np.array([])
        pre = np.array([])
        with torch.no_grad():
            for i, data in enumerate(valid_queue):
                image, label = data
                image = Variable(image).to(self.device)
                label = Variable(label).to(self.device)
                n = image.size(0)
                outputs = model(image)
                loss = self.criterion(outputs, label)
                prec1_, t_, p_ = utils.accuracy(outputs, label, topk=(1,))
                objs.update(loss.item(), n)
                top1.update(prec1_[0].item(), n)
                tar = np.append(tar, t_.data.cpu().numpy())
                pre = np.append(pre, p_.data.cpu().numpy())

        return top1.avg, objs.avg, tar, pre

    # def cal_results(self , matrix):
    #     shape = np.shape(matrix)
    #     number = 0
    #     sum = 0
    #     AA = np.zeros([shape[0]], dtype=np.float) #AA[0] = 0,
    #     for i in range(shape[0]):
    #         number += matrix[i, i] #number=0,672,
    #         if np.sum(matrix[i, :]) == 0:
    #             AA[i]= 0
    #         elif np.sum(matrix[i, :]) != 0:
    #             AA[i] = matrix[i, i] / np.sum(matrix[i, :])
    #         sum += np.sum(matrix[i, :]) * np.sum(matrix[:, i])#sum = 0
    #     OA = number / np.sum(matrix)
    #     AA_mean = np.mean(AA)
    #     pe = sum / (np.sum(matrix) ** 2)
    #     Kappa = (OA - pe) / (1 - pe)
    #     return OA, AA_mean, Kappa, AA

    def init_file(self):
        f1 = open("Individuals_train_log.txt", "w")
        f1.seek(0)
        f1.truncate()
        f1.write('All individual training is as follows：' + '\n')
        f1.close()
        f2 = open("Best_individual_train_log.txt", "w")
        f2.seek(0)
        f2.truncate()
        f2.write('The optimal individual training is as follows：' + '\n')
        f2.close()

        f_0 = open("superior_individual_information.txt", "w")
        f_0.seek(0)
        f_0.truncate()
        f_0.write('Better individuals over the ages keep updated information：' + '\n')
        f_0.close()

        f_2 = open("Best_individual_information.txt", "w")
        f_2.seek(0)
        f_2.truncate()
        f_2.write('The optimal individual information is as follows：' + '\n')
        f_2.close()
