# -*- coding: utf-8 -*-
from keras.backend import dropout
from keras.losses import MSE
from matplotlib.pyplot import autoscale
from numpy.lib.type_check import real
from scipy.sparse import data
from accuracy import MAE, MAPE, sMAPE,RMSE
from read_data import *
import huber
from my_huber_loss import my_huber_loss_withthreshold


def svr_model(df, sequence_length=4, horizon=1, index='Power'):
    import pandas as pd
    import numpy as np

    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    data = df[index]
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data, sequence_length, horizon)
    x_train, x_test, y_train, y_test = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # %% 模型
    from sklearn.svm import SVR
    from sklearn.model_selection import GridSearchCV
    svr = SVR(kernel='rbf', C=5, gamma=1)
    svr.fit(x_train, y_train.ravel())

    y_pre = svr.predict(x_test)
    y_test_rel, y_test_pre = iverse_data(y_train_h, y_pre, y_test)
    y_test_pre = y_test_pre.reshape(-1, 1)
    return y_test_rel, y_test_pre


def bpnn_model(df, sequence_length=4, horizon=1, index='Power', epochs=50):
    import pandas as pd
    import numpy as np

    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    data = df[index]
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))
    #
    from keras.layers import Dense, Flatten
    from keras.models import Sequential
    verbose = 0

    model = Sequential()
    model.add(Dense(20, input_shape=(x_test1.shape[1], 1)))
    model.add(Flatten())
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)

    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def elm_model(df, sequence_length=4, horizon=1, index='Power'):
    import pandas as pd
    import numpy as np

    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    data = df[index]
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data, sequence_length, horizon)
    x_train, x_test, y_train, y_test = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)

    class RELM_HiddenLayer:

        """
            正则化的极限学习机
            :param x: 初始化学习机时的训练集属性X
            :param num: 学习机隐层节点数
            :param C: 正则化系数的倒数
        """

        def __init__(self, x, num, C=10):
            row = x.shape[0]
            columns = x.shape[1]
            rnd = np.random.RandomState()
            # 权重w
            self.w = rnd.uniform(-1, 1, (columns, num))
            # 偏置b
            self.b = np.zeros([row, num], dtype=float)
            for i in range(num):
                rand_b = rnd.uniform(-0.4, 0.4)
                for j in range(row):
                    self.b[j, i] = rand_b
            self.H0 = np.matrix(self.sigmoid(np.dot(x, self.w) + self.b))
            self.C = C
            self.P = (self.H0.H * self.H0 + len(x) / self.C).I

        @staticmethod
        def sigmoid(x):
            """
                激活函数sigmoid
                :param x: 训练集中的X
                :return: 激活值
            """
            return 1.0 / (1 + np.exp(-x))

        # 回归问题 训练
        def regressor_train(self, T):
            """
                初始化了学习机后需要传入对应标签T
                :param T: 对应属性X的标签T
                :return: 隐层输出权值beta
            """
            all_m = np.dot(self.P, self.H0.H)
            self.beta = np.dot(all_m, T)
            return self.beta

        # 回归问题 测试
        def regressor_test(self, test_x):
            """
                传入待预测的属性X并进行预测获得预测值
                :param test_x:特征
                :return: 预测值
            """
            b_row = test_x.shape[0]
            h = self.sigmoid(np.dot(test_x, self.w) + self.b[:b_row, :])
            result = np.dot(h, self.beta)
            return result
    my_EML = RELM_HiddenLayer(x_train, 18)
    my_EML.regressor_train(y_train)

    y_pre = my_EML.regressor_test(x_test)

    y_test_rel, y_test_pre = iverse_data(y_train_h, y_pre, y_test)
    y_test_pre = y_test_pre.reshape(-1, 1)
    return y_test_rel, y_test_pre


def lstm_model_huber(df, sequence_length=4, horizon=1, index='Power', epochs=50):
    import pandas as pd
    import numpy as np

    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    data = df[index]
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))
    #
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    verbose = 0
    model = Sequential()
    model.add(LSTM(71, input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(30))  # ,return_sequences=True
    model.add(Dense(1))
    model.compile(loss=my_huber_loss_withthreshold(0.5), optimizer='adam')

    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def lstm_model(df, sequence_length=4, horizon=1, index='Power', epochs=50):
    import pandas as pd
    import numpy as np

    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    data = df[index]
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))
    #
    from keras.layers import LSTM, Dense, Dropout
    from keras.models import Sequential
    verbose = 0
    model = Sequential()
    model.add(LSTM(32, input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(32))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')

    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def alo_lstm_model(df, sequence_length=4, horizon=1, column='Power', epochs=15):
    import pandas as pd
    import numpy as np
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    df = df
    #
    data_lstm = df[column]
    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    epochs = epochs
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data_lstm, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))
# %%
    from accuracy import RMSE, MAE, MAPE
    import random

    def target_function():

        return

    # Function: Initialize Variables
    def initial_population(colony_size, min_values, max_values, target_function):
        population = np.zeros((colony_size, len(min_values) + 1))
        for i in range(0, colony_size):
            for j in range(0, len(min_values)):
                population[i, j] = random.uniform(min_values[j], max_values[j])
            population[i, -
                       1] = target_function(population[i, 0:population.shape[1]-1])
        return population

    # Function: Fitness
    def fitness_function(population):
        fitness = np.zeros((population.shape[0], 2))
        for i in range(0, fitness.shape[0]):
            fitness[i, 0] = 1/(1 + population[i, -1] +
                               abs(population[:, -1].min()))
        fit_sum = fitness[:, 0].sum()
        fitness[0, 1] = fitness[0, 0]
        for i in range(1, fitness.shape[0]):
            fitness[i, 1] = (fitness[i, 0] + fitness[i-1, 1])
        for i in range(0, fitness.shape[0]):
            fitness[i, 1] = fitness[i, 1]/fit_sum
        return fitness

    # Function: Selection
    def roulette_wheel(fitness):
        ix = 0
        random = int.from_bytes(os.urandom(
            8), byteorder="big") / ((1 << 64) - 1)
        for i in range(0, fitness.shape[0]):
            if (random <= fitness[i, 1]):

                ix = i
                break
        return ix

    # Function: Random Walk
    def random_walk(iterations):
        x_random_walk = [0]*(iterations + 1)
        x_random_walk[0] = 0
        for k in range(1, len(x_random_walk)):
            rand = int.from_bytes(os.urandom(
                8), byteorder="big") / ((1 << 64) - 1)
            if rand > 0.5:
                rand = 1
            else:
                rand = 0
            x_random_walk[k] = x_random_walk[k-1] + (3*rand - 1)
        return x_random_walk

    # Function: Combine Ants
    def combine(population, antlions):
        combination = np.vstack([population, antlions])
        combination = combination[combination[:, -1].argsort()]
        for i in range(0, population.shape[0]):
            for j in range(0, population.shape[1]):
                antlions[i, j] = combination[i, j]
                population[i, j] = combination[i + population.shape[0], j]
        return population, antlions

    # Function: Update Antlion
    def update_ants(population, antlions, count, iterations, min_values, max_values, target_function):
        i_ratio = 1
        minimum_c_i = np.zeros((1, population.shape[1]))
        maximum_d_i = np.zeros((1, population.shape[1]))
        minimum_c_e = np.zeros((1, population.shape[1]))
        maximum_d_e = np.zeros((1, population.shape[1]))
        elite_antlion = np.zeros((1, population.shape[1]))
        if (count > 0.20*iterations):
            w_exploration = 2
            i_ratio = (10**w_exploration)*(count/iterations)
        elif(count > 0.35*iterations):
            w_exploration = 3
            i_ratio = (10**w_exploration)*(count/iterations)
        elif(count > 0.5*iterations):
            w_exploration = 4
            i_ratio = (10**w_exploration)*(count/iterations)
        elif(count > 0.7*iterations):
            w_exploration = 5
            i_ratio = (10**w_exploration)*(count/iterations)
        elif(count > 0.9*iterations):
            w_exploration = 6
            i_ratio = (10**w_exploration)*(count/iterations)
        for i in range(0, population.shape[0]):
            fitness = fitness_function(antlions)
            ant_lion = roulette_wheel(fitness)
            for j in range(0, population.shape[1] - 1):
                minimum_c_i[0, j] = antlions[antlions[:, -1].argsort()
                                             ][0, j]/i_ratio
                maximum_d_i[0, j] = antlions[antlions[:, -1].argsort()
                                             ][-1, j]/i_ratio
                elite_antlion[0, j] = antlions[antlions[:, -1].argsort()][0, j]
                minimum_c_e[0, j] = antlions[antlions[:, -1].argsort()
                                             ][0, j]/i_ratio
                maximum_d_e[0, j] = antlions[antlions[:, -1].argsort()
                                             ][-1, j]/i_ratio
                rand = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                if (rand < 0.5):
                    minimum_c_i[0, j] = minimum_c_i[0, j] + \
                        antlions[ant_lion, j]
                    minimum_c_e[0, j] = minimum_c_e[0, j] + elite_antlion[0, j]
                else:
                    minimum_c_i[0, j] = - minimum_c_i[0, j] + \
                        antlions[ant_lion, j]
                    minimum_c_e[0, j] = - \
                        minimum_c_e[0, j] + elite_antlion[0, j]

                rand = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                if (rand >= 0.5):
                    maximum_d_i[0, j] = maximum_d_i[0, j] + \
                        antlions[ant_lion, j]
                    maximum_d_e[0, j] = maximum_d_e[0, j] + elite_antlion[0, j]
                else:
                    maximum_d_i[0, j] = - maximum_d_i[0, j] + \
                        antlions[ant_lion, j]
                    maximum_d_e[0, j] = - \
                        maximum_d_e[0, j] + elite_antlion[0, j]
                x_random_walk = random_walk(iterations)
                e_random_walk = random_walk(iterations)
                min_x, max_x = min(x_random_walk), max(x_random_walk)
                x_random_walk[count] = (((x_random_walk[count] - min_x)*(
                    maximum_d_i[0, j] - minimum_c_i[0, j]))/(max_x - min_x)) + minimum_c_i[0, j]
                min_e, max_e = min(e_random_walk), max(e_random_walk)
                e_random_walk[count] = (((e_random_walk[count] - min_e)*(
                    maximum_d_e[0, j] - minimum_c_e[0, j]))/(max_e - min_e)) + minimum_c_e[0, j]
                population[i, j] = np.clip(
                    (x_random_walk[count] + e_random_walk[count])/2, min_values[j], max_values[j])
            population[i, -
                       1] = target_function(population[i, 0:population.shape[1]-1])
            return population, antlions

    # ALO Function
    def ant_lion_optimizer(colony_size, min_values, max_values, iterations, target_function):
        count = 0
        population = initial_population(
            colony_size=colony_size, min_values=min_values, max_values=max_values, target_function=target_function)
        antlions = initial_population(colony_size=colony_size, min_values=min_values,
                                      max_values=max_values, target_function=target_function)
        elite = np.copy(antlions[antlions[:, -1].argsort()][0, :])
        while (count <= iterations):
            print("----------------------------split----------------------------")
            print("Iteration = ", count, " fitness = ", elite[-1])
            fitns.append(elite[-1])
            population, antlions = update_ants(population, antlions, count=count, iterations=iterations,
                                               min_values=min_values, max_values=max_values, target_function=target_function)
            population, antlions = combine(population, antlions)
            value = np.copy(antlions[antlions[:, -1].argsort()][0, :])
            if(elite[-1] > value[-1]):
                elite = np.copy(value)
            else:
                antlions[antlions[:, -1].argsort()][0, :] = np.copy(elite)
            count = count + 1
            print(elite)
        return elite

    # %%
    def function(variables_values=[32, 16]):
        epochs = 40
        verbose = 0
        model = Sequential()
        model.add(LSTM(int(round(variables_values[0])), input_shape=(
            int(x_train1.shape[1]), 1), return_sequences=True))
        model.add(LSTM(int(round(variables_values[1]))))
        model.add(Dense(1))
        model.compile(loss=huber.huber_loss, optimizer='adam')
        model.fit(x_train1, y_train1, epochs=epochs,
                  validation_split=0, verbose=verbose)

        result = model.predict(x_test1)  # x_test
        result = result.reshape(-1, 1)
        real,result=iverse_data(y_train_h,result,y_test1)
        fuctions = RMSE(real, result)  # y_test
        a=MAE(real,result)
        s=sMAPE(real,result)
        print(fuctions)
        if s<23 and s>22 :
            print('rmse',fuctions,a,s)
            x=pd.DataFrame(result,columns=['ALO-LSTM'])
            x.to_csv(str(fuctions)[0:7]+'ALO-LSTM.csv')
        return fuctions

    fitns = []
    alo = ant_lion_optimizer(colony_size=5,
                             min_values=[10, 10], max_values=[50, 50],
                             iterations=5, target_function=function)

    verbose = 0
    n_multi = 0
    model = Sequential()
    model.add(LSTM(int(round(alo[0])), input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(int(round(alo[1]))))
    model.add(Dense(1))
    model.compile(loss=huber.huber_loss, optimizer='adam')
    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def  pso_lstm_model(df, sequence_length=4, horizon=1, column='Power', epochs=40):

    import pandas as pd
    import numpy as np
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    df = df
    #
    data_lstm = df[column]
    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    epochs = epochs
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data_lstm, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    X_train = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))

    # % PSO优化版LSTM
    class PSO():
        def __init__(self, pN, dim, max_iter):
            self.w = 0.8
            self.c1 = 1.6
            self.c2 = 1.7
            self.r1 = 0.75
            self.r2 = 0.55
            self.pN = pN  # 粒子数量
            self.dim = dim  # 搜索维度
            self.max_iter = max_iter  # 迭代次数
            self.X = np.zeros((self.pN, self.dim))  # 所有粒子的位置和速度
            self.V = np.zeros((self.pN, self.dim))
            self.pbest = np.zeros((self.pN, self.dim))  # 个体经历的最佳位置和全局最佳位置
            self.gbest = np.zeros((1, self.dim))
            self.p_fit = np.zeros(self.pN)  # 每个个体的历史最佳适应值
            self.fit = 100
            self.aa = X_train
            self.a1 = y_train1
            self.aa1 = 0
            self.a21 = 0
            self.a2 = np.array(self.a21)

    # ---------------------目标函数-----------------------------
        def function(self, x1, y1):
            # model = svm.SVC(kernel='rbf', C=x1,gamma=y1)
            # model.fit(self.aa, self.a1)
            # pred_test_y = model.predict(self.aa1)
            # score = sm.precision_score(self.a21,pred_test_y,average='weighted')

            #sequence_length = 6
            model = Sequential()
            model.add(LSTM(int(x1), input_shape=(
                x_train1.shape[1], 1), return_sequences=True))
            # model.add(LSTM(120,return_sequences=True))
            model.add(LSTM(int(y1), return_sequences=False))
            model.add(Dense(1))
            model.compile(loss=huber.huber_loss, optimizer='adam')
            model.fit(self.aa, self.a1, epochs=25, verbose=0)
            pre = model.predict(self.aa)
            pre = pre.reshape(-1, 1)
            rmse = RMSE(self.a1, pre)
            print(rmse)
            return rmse
    # ---------------------初始化种群----------------------------------

        def init_Population(self):
            for i in range(self.pN):
                self.X[i][0] = random.randint(10, 50)
                self.X[i][1] = random.randint(10, 50)

                self.V[i][0] = random.randint(2, 5)
                self.V[i][1] = random.randint(2, 5)

                self.pbest[i] = self.X[i]
    #            tmp = self.function(self.X[i][0],self.X[i][1])
                tmp = self.function(self.X[i][0], self.X[i][1])
                self.p_fit[i] = tmp
                if(tmp < self.fit):
                    self.fit = tmp
                    self.gbest = self.X[i]

    # ----------------------更新粒子位置----------------------------------
        def iterator(self):
            fitness = []
            for t in range(self.max_iter):
                print("-------------------------------------------------------------split-------------------------------------------------------------")
                print("迭代次数：", t)
                for i in range(self.pN):  # 更新gbest\pbest
                    #                temp = self.function(self.X[i][0],self.X[i][1])
                    temp = self.function(self.X[i][0], self.X[i][1])
                    if(temp < self.p_fit[i]):  # 更新个体最优
                        self.p_fit[i] = temp
                        self.pbest[i] = self.X[i]
                        if(self.p_fit[i] < self.fit):  # 更新全局最优
                            self.gbest = self.X[i]
                            self.fit = self.p_fit[i]
                for i in range(self.pN):
                    V = self.w*self.V[i] + self.c1*self.r1*(
                        self.pbest[i] - self.X[i])+self.c2*self.r2*(self.gbest - self.X[i])
                    X = self.X[i] + V
                    # print(V)
                    if 0 < X[0] < 300 and 0 < X[1] < 50:
                        self.V[i] = V
                        self.X[i] = X
                        # print("i:",i)
                # print("v:",self.V)
                fitness.append(self.fit)
                print("适应度:", self.fit)  # 输出最优值
                print("最优解:", self.gbest)
                # print("局部最优解:",self.pbest)
            return fitness, self.gbest

    import random
    import warnings
    warnings.filterwarnings("ignore")
    from keras.layers.recurrent import LSTM
    from keras.layers import Dense, Activation, LSTM
    from keras import losses
    my_pso = PSO(pN=10, dim=2, max_iter=5)
    my_pso.init_Population()
    fitness, para = my_pso.iterator()
    verbose = 0
    n_multi = 0
    model = Sequential()
    model.add(LSTM(int(round(para[0])), input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(int(round(para[1]))))
    model.add(Dense(1))
    model.compile(loss=huber.huber_loss, optimizer='adam')
    model.fit(X_train, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def ga_lstm_model(df, sequence_length=4, horizon=1, column='Power', epochs=50):

    import pandas as pd
    import numpy as np
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    df = df
    #
    data_lstm = df[column]
    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    epochs = epochs
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data_lstm, sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))

    # %% GA运行过程
    from sklearn.metrics import accuracy_score
    from sklearn.ensemble import RandomForestClassifier

    class Gene:
        """
        This is a class to represent individual(Gene) in GA algorithom
        each object of this class have two attribute: data, size
        """

        def __init__(self, **data):
            self.__dict__.update(data)
            self.size = len(data['data'])  # length of gene

    class GA:
        """
        This is a class of GA algorithm.
        """

        def __init__(self, parameter):
            """
            Initialize the pop of GA algorithom and evaluate the pop by computing its' fitness value.
            The data structure of pop is composed of several individuals which has the form like that:

            {'Gene':a object of class Gene, 'fitness': 1.02(for example)}
            Representation of Gene is a list: [b s0 u0 sita0 s1 u1 sita1 s2 u2 sita2]

            """
            # parameter = [CXPB, MUTPB, NGEN, popsize, low, up]
            self.parameter = parameter

            low = self.parameter[4]
            up = self.parameter[5]

            self.bound = []
            self.bound.append(low)
            self.bound.append(up)

            pop = []
            for i in range(self.parameter[3]):
                geneinfo = []
                for pos in range(len(low)):
                    # initialise popluation
                    geneinfo.append(random.randint(
                        self.bound[0][pos], self.bound[1][pos]))

                fitness = self.evaluate(geneinfo)  # evaluate each chromosome
                # store the chromosome and its fitness
                pop.append({'Gene': Gene(data=geneinfo), 'fitness': fitness})

            self.pop = pop
            # store the best chromosome in the population
            self.bestindividual = self.selectBest(self.pop)

        def evaluate(self, geneinfo):
            """
            fitness function
            """
            # clf = RandomForestClassifier(n_estimators= geneinfo[0], max_depth = geneinfo[1])
            # model = clf.fit(X_train, y_train)

            # result = model.predict(X_test)
            # y_pred = np.around(result)
            # acc = accuracy_score(y_test, y_pred)

            epochs = 20
            verbose = 0
            model = Sequential()
            model.add(LSTM(int(round(geneinfo[0])), input_shape=(
                int(x_train1.shape[1]), 1), return_sequences=True))
            model.add(LSTM(int(round(geneinfo[1]))))
            model.add(Dense(1))
            model.compile(loss=huber.huber_loss, optimizer='adam')
            # print(x_train1.shape)
            model.fit(x_train1, y_train1, epochs=epochs,
                      validation_split=0, verbose=verbose)

            from accuracy import RMSE
            result = model.predict(x_test1)  # x_test
            result = result.reshape(-1, 1)
            real,result=iverse_data(y_train_h,result,y_test1)
            fuctions = RMSE(real, result)  # y_test
            a=MAE(real,result)
            s=sMAPE(real,result)
            if s>22 and s<23:
                print('rmse',fuctions,a,s)
                x=pd.DataFrame(result,columns=['GA-LSTM'])
                x.to_csv(str(fuctions)[0:7]+'GA-LSTM.csv')
            return fuctions

        def selectBest(self, pop):
            """
            select the best individual from pop
            """
            s_inds = sorted(pop, key=itemgetter(
                "fitness"), reverse=True)          # from large to small, return a pop
            return s_inds[0]

        def selection(self, individuals, k):
            """
            select some good individuals from pop, note that good individuals have greater probability to be choosen
            for example: a fitness list like that:[5, 4, 3, 2, 1], sum is 15,
            [-----|----|---|--|-]
            012345|6789|101112|1314|15
            we randomly choose a value in [0, 15],
            it belongs to first scale with greatest probability
            """
            s_inds = sorted(individuals, key=itemgetter("fitness"),
                            reverse=True)  # sort the pop by the reference of fitness
            # sum up the fitness of the whole pop
            sum_fits = sum(ind['fitness'] for ind in individuals)

            chosen = []
            for i in range(k):
                # randomly produce a num in the range of [0, sum_fits], as threshold
                u = random.random() * sum_fits
                sum_ = 0
                for ind in s_inds:
                    sum_ += ind['fitness']  # sum up the fitness
                    if sum_ >= u:
                        # when the sum of fitness is bigger than u, choose the one, which means u is in the range of
                        # [sum(1,2,...,n-1),sum(1,2,...,n)] and is time to choose the one ,namely n-th individual in the pop
                        chosen.append(ind)
                        break
            # from small to large, due to list.pop() method get the last element
            chosen = sorted(chosen, key=itemgetter("fitness"), reverse=False)
            return chosen

        def crossoperate(self, offspring):
            """
            cross operation
            here we use two points crossoperate
            for example: gene1: [5, 2, 4, 7], gene2: [3, 6, 9, 2], if pos1=1, pos2=2
            5 | 2 | 4  7
            3 | 6 | 9  2
            =
            3 | 2 | 9  2
            5 | 6 | 4  7
            """
            dim = len(offspring[0]['Gene'].data)

            # Gene's data of first offspring chosen from the selected pop
            geninfo1 = offspring[0]['Gene'].data
            # Gene's data of second offspring chosen from the selected pop
            geninfo2 = offspring[1]['Gene'].data

            if dim == 1:
                pos1 = 1
                pos2 = 1
            else:
                # select a position in the range from 0 to dim-1,
                pos1 = random.randrange(1, dim)
                pos2 = random.randrange(1, dim)

            newoff1 = Gene(data=[])  # offspring1 produced by cross operation
            newoff2 = Gene(data=[])  # offspring2 produced by cross operation
            temp1 = []
            temp2 = []
            for i in range(dim):
                if min(pos1, pos2) <= i < max(pos1, pos2):
                    temp2.append(geninfo2[i])
                    temp1.append(geninfo1[i])
                else:
                    temp2.append(geninfo1[i])
                    temp1.append(geninfo2[i])
            newoff1.data = temp1
            newoff2.data = temp2

            return newoff1, newoff2

        def mutation(self, crossoff, bound):
            """
            mutation operation
            """
            dim = len(crossoff.data)

            if dim == 1:
                pos = 0
            else:
                # chose a position in crossoff to perform mutation.
                pos = random.randrange(0, dim)

            crossoff.data[pos] = random.randint(bound[0][pos], bound[1][pos])
            return crossoff

        def GA_main(self):
            """
            main frame work of GA
            """
            popsize = self.parameter[3]

            print("Start of evolution")

            # Begin the evolution
            for g in range(NGEN):

                print("############### Generation {} ###############".format(g))

                # Apply selection based on their converted fitness
                selectpop = self.selection(self.pop, popsize)

                nextoff = []
                while len(nextoff) != popsize:
                    # Apply crossover and mutation on the offspring

                    # Select two individuals
                    offspring = [selectpop.pop() for _ in range(2)]

                    if random.random() < CXPB:  # cross two individuals with probability CXPB
                        crossoff1, crossoff2 = self.crossoperate(offspring)
                        if random.random() < MUTPB:  # mutate an individual with probability MUTPB
                            muteoff1 = self.mutation(crossoff1, self.bound)
                            muteoff2 = self.mutation(crossoff2, self.bound)
                            # Evaluate the individuals
                            fit_muteoff1 = self.evaluate(muteoff1.data)
                            # Evaluate the individuals
                            fit_muteoff2 = self.evaluate(muteoff2.data)
                            nextoff.append(
                                {'Gene': muteoff1, 'fitness': fit_muteoff1})
                            nextoff.append(
                                {'Gene': muteoff2, 'fitness': fit_muteoff2})
                        else:
                            fit_crossoff1 = self.evaluate(
                                crossoff1.data)  # Evaluate the individuals
                            fit_crossoff2 = self.evaluate(crossoff2.data)
                            nextoff.append(
                                {'Gene': crossoff1, 'fitness': fit_crossoff1})
                            nextoff.append(
                                {'Gene': crossoff2, 'fitness': fit_crossoff2})
                    else:
                        nextoff.extend(offspring)

                # The population is entirely replaced by the offspring
                self.pop = nextoff

                # Gather all the fitnesses in one list and print the stats
                fits = [ind['fitness'] for ind in self.pop]

                best_ind = self.selectBest(self.pop)

                if best_ind['fitness'] > self.bestindividual['fitness']:
                    self.bestindividual = best_ind

                print("Best individual found is {}, {}".format(self.bestindividual['Gene'].data,
                                                               self.bestindividual['fitness']))
                print("  Max fitness of current pop: {}".format(max(fits)))

            print("------ End of (successful) evolution ------")
            return self.bestindividual['Gene'].data

    import random
    import warnings
    warnings.filterwarnings("ignore")
    from keras.layers.recurrent import LSTM
    from keras.layers import Dense, Activation, LSTM
    from keras import losses

    from operator import itemgetter
    CXPB, MUTPB, NGEN, popsize = 0.8, 0.2, 5, 10  # 杂交概率，变异概率，迭代次数，种群数量
    up = [50, 50]  # upper range for variables
    low = [10, 10]  # lower range for variables
    parameter = [CXPB, MUTPB, NGEN, popsize, low, up]
    run = GA(parameter)
    para = run.GA_main()

    verbose = 0
    n_multi = 0
    model = Sequential()
    model.add(LSTM(int(round(para[0])), input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(int(round(para[1]))))
    model.add(Dense(1))
    model.compile(loss=huber.huber_loss, optimizer='adam')
    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


def SSA(df, sequence_length=4, horizon=1, column='Power', epochs=25):
    import pandas as pd
    import numpy as np
    from accuracy import RMSE
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    # from Slap_Swarm_optimism import salp_swarm_algorithm
    import warnings
    warnings.filterwarnings("ignore")
    from keras.layers import Dense, Activation, LSTM
    df = df
    data_lstm = df[column]
    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    epochs = epochs
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data_lstm, sequence_length, horizon)
    # print('x_train_h',x_train_h)
    # print('y_train_h', y_train_h)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))

    # Required Libraries
    import numpy as np
    import math
    import random
    import os

    # Function
    def target_function():

        return

    # Function: Initialize Variables
    def initial_position(swarm_size, min_values, max_values, target_function=target_function):
        position = np.zeros((swarm_size, len(min_values) + 1))
        for i in range(0, swarm_size):
            for j in range(0, len(min_values)):
                position[i, j] = random.uniform(min_values[j], max_values[j])
            position[i, -
                     1] = target_function(position[i, 0:position.shape[1] - 1])
        return position

    # Function: Initialize Food Position
    def food_position(dimension, target_function=target_function):
        food = np.zeros((1, dimension + 1))
        for j in range(0, dimension):
            food[0, j] = 2
        food[0, -1] = target_function(food[0, 0:food.shape[1] - 1])
        return food

    # Function: Updtade Food Position by Fitness
    def update_food(position, food):
        for i in range(0, position.shape[0]):
            if (food[0, -1] > position[i, -1]):
                for j in range(0, position.shape[1]):
                    food[0, j] = position[i, j]
        return food

    # Function: Updtade Position
    def update_position(position, food, c1, min_values, max_values, target_function):
        for i in range(0, position.shape[0]):
            if (i <= position.shape[0] / 2):
                for j in range(0, len(min_values)):
                    c2 = int.from_bytes(os.urandom(
                        8), byteorder="big") / ((1 << 64) - 1)
                    c3 = int.from_bytes(os.urandom(
                        8), byteorder="big") / ((1 << 64) - 1)
                    if (c3 >= 0.5):  # c3 < 0.5
                        position[i, j] = np.clip(
                            (food[0, j] + c1 * ((max_values[j] - min_values[j])
                             * c2 + min_values[j])), min_values[j],
                            max_values[j])
                    else:
                        position[i, j] = np.clip(
                            (food[0, j] - c1 * ((max_values[j] - min_values[j])
                             * c2 + min_values[j])), min_values[j],
                            max_values[j])
            elif (i > position.shape[0] / 2 and i < position.shape[0] + 1):
                for j in range(0, len(min_values)):
                    position[i, j] = np.clip(
                        ((position[i - 1, j] + position[i, j]) / 2), min_values[j], max_values[j])
            position[i, -
                     1] = target_function(position[i, 0:position.shape[1] - 1])
        return position

    # SSA Function
    def salp_swarm_algorithm(swarm_size, min_values, max_values, iterations, target_function=target_function):
        count = 0
        position = initial_position(swarm_size=swarm_size, min_values=min_values, max_values=max_values,
                                    target_function=target_function)
        food = food_position(dimension=len(min_values),
                             target_function=target_function)
        while (count <= iterations):
            print("Iteration = ", count, " fitness = ", food[0, -1])
            c1 = 2 * math.exp(-(4 * (count / iterations)) ** 2)
            food = update_food(position, food)
            position = update_position(position, food, c1=c1, min_values=min_values, max_values=max_values,
                                       target_function=target_function)
            count = count + 1
        print(food)
        return food

    def function(variables_values=[32, 16, 0.15]):
        epochs = 25
        verbose = 0
        model = Sequential()
        model.add(LSTM(int(round(variables_values[0])), input_shape=(
            int(x_train1.shape[1]), 1), return_sequences=True))
        model.add(LSTM(int(round(variables_values[1]))))
        model.add(Dense(1))
        model.compile(loss=my_huber_loss_withthreshold(
            variables_values[2]), optimizer='adam')
        model.fit(x_train1, y_train1, epochs=epochs,
                  validation_split=0, verbose=verbose)

        result = model.predict(x_test1)  # x_test
        result = result.reshape(-1, 1)
        real,y_pre=iverse_data(y_train_h,result,y_test1)
        fuctions = RMSE(real, result)  # y_test
        s=sMAPE(real,result)
        a=MAE(real,result)
        if s<21.6:
            print('rmse',fuctions,a,s)
            x=pd.DataFrame(y_pre,columns=['SSA-LSTM'])
            x.to_csv(str(fuctions)[0:7]+'SSA-LSTM.csv')        
        return fuctions

    ssa = salp_swarm_algorithm(swarm_size=10, min_values=[10, 10, 0], max_values=[50, 50, 0.3],
                               iterations=5, target_function=function)
    para = ssa[0]

    verbose = 0
    n_multi = 0
    model = Sequential()
    model.add(LSTM(int(round(para[0])), input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(int(round(para[1]))))
    model.add(Dense(1))
    model.compile(loss=my_huber_loss_withthreshold(para[2]), optimizer='adam')
    model.fit(x_train1, y_train1, epochs=epochs,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre
