
# Created by: Prof. Valdecy Pereira, D.Sc.
# UFF - Universidade Federal Fluminense (Brazil)
# email:  valdecy.pereira@gmail.com
# Course: Metaheuristics
# Lesson: Harris Hawks Optimization

# Citation:
# PEREIRA, V. (2021). Project: Metaheuristic-Grey_Wolf_Optimizer, File: Python-MH- Harris Hawks Optimization.py, GitHub repository: <https://github.com/Valdecy>

############################################################################

# Required Libraries
from accuracy import RMSE, MAE, MAPE, R_square, sMAPE
import time
import warnings
from forecast_models import *
import pandas as pd
from typing import Sequence
import numpy as np
import random
import os

from scipy.special import gamma

############################################################################

# Function


def target_function():
    return

############################################################################

# Function: Initialize Variables


def initial_position(hawks=5, min_values=[-5, -5], max_values=[5, 5], target_function=target_function):
    position = np.zeros((hawks, len(min_values)+1))
    for i in range(0, hawks):
        for j in range(0, len(min_values)):
            position[i, j] = random.uniform(min_values[j], max_values[j])
        position[i, -1] = target_function(position[i, 0:position.shape[1]-1])
    return position

# Function: Levy Distribution Vector


def levy_flight(dimensions, beta=1.5):
    levy = np.zeros((1, dimensions))
    for j in range(0, levy.shape[1]):
        beta = beta
        r1 = int.from_bytes(os.urandom(8), byteorder="big") / ((1 << 64) - 1)
        r2 = int.from_bytes(os.urandom(8), byteorder="big") / ((1 << 64) - 1)
        sig_num = gamma(1+beta)*np.sin((np.pi*beta)/2.0)
        sig_den = gamma((1+beta)/2)*beta*2**((beta-1)/2)
        sigma = (sig_num/sig_den)**(1/beta)
        levy[0, j] = (0.01*r1*sigma)/(abs(r2)**(1/beta))
    return levy[0]

# Function: Updtade Rabbit Position by Fitness


def update_rabbit_position(position_h, position_r):
    for i in range(0, position_h.shape[0]):
        if (position_h[i, -1] < position_r[-1]):
            position_r = np.copy(position_h[i, :])
    return position_r

# Function: Updtade Hawks Position


def update_hawks_position(position_h, position_r, e_r_factor, min_values=[-5, -5], max_values=[5, 5], target_function=target_function):
    for i in range(0, position_h.shape[0]):
        escaping_energy = e_r_factor * \
            (2*(int.from_bytes(os.urandom(8), byteorder="big") / ((1 << 64) - 1)) - 1)
        if abs(escaping_energy) >= 1:
            rand_1 = int.from_bytes(os.urandom(
                8), byteorder="big") / ((1 << 64) - 1)
            idx = random.choice(list(range(0, position_h.shape[1])))
            hawk = position_h[idx, :]
            if (rand_1 < 0.5):
                a = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                b = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                position_h[i, :-1] = hawk[:-1] - a * \
                    abs(hawk[:-1] - 2 * b * position_h[i, :-1])
            elif (rand_1 >= 0.5):
                c = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                d = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                position_h[i, :-1] = (position_r[:-1] - position_h[i, :-1].mean(0)) - c * (
                    np.asarray(max_values) - np.asarray(min_values)) * d + np.asarray(min_values)
        elif abs(escaping_energy) < 1:
            rand_2 = int.from_bytes(os.urandom(
                8), byteorder="big") / ((1 << 64) - 1)
            if (rand_2 >= 0.5 and abs(escaping_energy) < 0.5):  # Hard Besiege
                position_h[i, :-1] = (position_r[:-1]) - escaping_energy * \
                    abs(position_r[:-1] - position_h[i, :-1])
            if (rand_2 >= 0.5 and abs(escaping_energy) >= 0.5):  # Soft Besiege
                e = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                jump_strength = 2 * (1 - e)
                position_h[i, :-1] = (position_r[:-1] - position_h[i, :-1]) - escaping_energy * abs(
                    jump_strength * position_r[:-1] - position_h[i, :-1])
            if (rand_2 < 0.5 and abs(escaping_energy) >= 0.5):  # Soft Besiege
                f = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                jump_strength = 2 * (1 - f)
                x1 = position_r[:-1] - escaping_energy * \
                    abs(jump_strength * position_r[:-1] - position_h[i, :-1])
                for j in range(0, len(min_values)):
                    x1[j] = np.clip(x1[j], min_values[j], max_values[j])
                t_x1 = target_function(x1)
                if (t_x1 < position_h[i, -1]):
                    position_h[i, :-1] = np.copy(x1)
                    position_h[i,  -1] = t_x1
                else:
                    x2 = position_r[:-1] - escaping_energy * abs(jump_strength * position_r[:-1] - position_h[i, :-1]) + np.multiply(
                        np.random.randn(len(min_values)), levy_flight(len(min_values), 1.5))
                    for j in range(0, len(min_values)):
                        x2[j] = np.clip(x2[j], min_values[j], max_values[j])
                    t_x2 = target_function(x2)
                    if (t_x2 < position_h[i, -1]):
                        position_h[i, :-1] = np.copy(x2)
                        position_h[i,  -1] = t_x2
            if (rand_2 < 0.5 and abs(escaping_energy) < 0.5):  # Hard besiege
                g = int.from_bytes(os.urandom(
                    8), byteorder="big") / ((1 << 64) - 1)
                jump_strength = 2 * (1 - g)
                x1 = position_r[:-1] - escaping_energy * \
                    abs(jump_strength *
                        position_r[:-1] - position_h[i, :-1].mean(0))
                for j in range(0, len(min_values)):
                    x1[j] = np.clip(x1[j], min_values[j], max_values[j])
                t_x1 = target_function(x1)
                if (t_x1 < position_h[i, -1]):
                    position_h[i, :-1] = np.copy(x1)
                    position_h[i,  -1] = t_x1
                else:
                    x2 = position_r[:-1] - escaping_energy * abs(jump_strength * position_r[:-1] - position_h[i, :-1].mean(
                        0)) + np.multiply(np.random.randn(len(min_values)), levy_flight(len(min_values), 1.5))
                    for j in range(0, len(min_values)):
                        x2[j] = np.clip(x2[j], min_values[j], max_values[j])
                    t_x2 = target_function(x2)
                    if (t_x2 < position_h[i, -1]):
                        position_h[i, :-1] = np.copy(x2)
                        position_h[i,  -1] = t_x2
    return position_h

# HHO Function


def HHO(hawks=5, min_values=[-5, -5], max_values=[5, 5], iterations=50, target_function=target_function):
    count = 0
    position_h = initial_position(
        hawks=hawks, min_values=min_values, max_values=max_values, target_function=target_function)
    position_r = np.copy(position_h[0, :])
    while (count <= iterations):
        print('Iteration = ', count,  ' f(x) = ', position_r[-1])
        position_r = update_rabbit_position(position_h, position_r)
        e_r_factor = 2 * (1 - (count / iterations))
        position_h = update_hawks_position(
            position_h, position_r, e_r_factor, min_values, max_values, target_function)
        count = count + 1
    return position_r


def hho_lstm_model(df, sequence_length=4, horizon=1, column='Power', epochs=25):
    import pandas as pd
    import numpy as np
    from keras.layers import LSTM, Dense
    from keras.models import Sequential
    df = df
    #
    data_lstm = df[column]
    from read_data import splitdata, standata, iverse_data
    sequence_length = sequence_length
    horizon = horizon
    epochs = epochs
    x_train_h, x_test_h, y_train_h, y_test_h = splitdata(
        data_lstm,  sequence_length, horizon)
    x_train1, x_test1, y_train1, y_test1 = standata(
        x_train_h, x_test_h, y_train_h, y_test_h)
    # 改变数据格式
    amount_of_features = 1
    x_train1 = np.reshape(
        x_train1, (x_train1.shape[0], x_train1.shape[1], amount_of_features))
    x_test1 = np.reshape(
        x_test1, (x_test1.shape[0], x_test1.shape[1], amount_of_features))
# %%
    from accuracy import RMSE, MAE, MAPE, sMAPE
    from my_huber_loss import my_huber_loss_withthreshold
    import random

    min_values = [20, 20]
    max_values = [50, 50]
    hawks = 20
    iterations = 10

    def function(variables_values=[32, 16]):
        print(variables_values)
        epochs = 27
        verbose = 0
        model = Sequential()
        # variables_values=np.clip(variables_values,lb,ub)
        model.add(LSTM(int(round(variables_values[0])), input_shape=(
            int(x_train1.shape[1]), 1), return_sequences=True))
        model.add(LSTM(int(round(variables_values[1]))))
        model.add(Dense(1))
        model.compile(loss=my_huber_loss_withthreshold(
            0.133), optimizer='adam')
        model.fit(x_train1, y_train1, epochs=epochs,
                  validation_split=0, verbose=verbose)

        result = model.predict(x_test1)  # x_test
        result = result.reshape(-1, 1)
        test_real, result = iverse_data(y_train_h, result, y_test1)
        fuctions = RMSE(test_real, result)  # y_test
        a = MAE(test_real, result)
        s = sMAPE(test_real, result)
        print('MAE:'+str(a)+'sMAE：'+str(s))

        if s < 21.6:
            print('rmse', fuctions, a, s)
            x = pd.DataFrame(result, columns=['HHO-LSTM'])
            x.to_csv(str(fuctions)[0:7]+'HHO-LSTM.csv')
        return a
    print('结束寻优**************')
    s = HHO(hawks=hawks, min_values=min_values, max_values=max_values,
            iterations=iterations, target_function=function)
    print(s)
    ans = s
    print(ans)
    verbose = 0
    n_multi = 0
    model = Sequential()
    model.add(LSTM(int(round(ans[0])), input_shape=(
        int(x_train1.shape[1]), 1), return_sequences=True))
    model.add(LSTM(int(round(ans[1]))))
    model.add(Dense(1))
    model.compile(loss=my_huber_loss_withthreshold(ans[2]), optimizer='adam')
    model.fit(x_train1, y_train1, epochs=27,
              validation_split=0, verbose=verbose)
    pre1 = model.predict(x_test1)
    y_test_rel, y_test_pre = iverse_data(y_train_h, pre1, y_test1)
    return y_test_rel, y_test_pre


df = pd.read_csv('data.csv', index_col=0)
df = df.interpolate(method='linear')
# df = df.iloc[0:3000,:]
times = time.strftime('%Y-%m-%d-%H-%M', time.localtime(time.time()))
warnings.filterwarnings('ignore')
horizon = 1
sequence_length = 12
model_index2 = 'HHO-lstm'
y_test_rel, y_HHO_pre = hho_lstm_model(
    df, sequence_length=sequence_length, horizon=horizon)
mae_HHO = MAE(y_test_rel, y_HHO_pre)
rmse_HHO = RMSE(y_test_rel, y_HHO_pre)
mape_HHO = MAPE(y_test_rel, y_HHO_pre)
sMAPE_HHO = sMAPE(y_test_rel, y_HHO_pre)
HHO_R2 = R_square(y_test_rel, y_HHO_pre)
print('HHO的MAE为：'+str(mae_HHO))  # "MAE:"
print('HHO的RMSE为：'+str(rmse_HHO))  # "RMSE:"
print('HHO的MAPE为：'+str(mape_HHO))
print('HHO的R2为：'+str(HHO_R2))
print('smape',str(sMAPE_HHO) )
print('******************')
print(y_HHO_pre)
x = pd.DataFrame(y_HHO_pre, columns=['HHO-LSTM'])
x.to_csv('HHO-LSTM-pre.csv')
