import math

import numpy as np
from numpy import random
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score

from app.utils import ArrayUtil


class BPMethod:

    # identify config
    def __init__(self, h_layer=9, max_epochs=50000, learning_rate=0.05, error=0.001):
        self.x_norm_train = None
        self.Boh = None
        self.Bih = None
        self.Woh = None
        self.Wih = None
        self.h_layer = h_layer
        self.max_epochs = max_epochs
        self.learning_rate = learning_rate
        self.error = error
        self.min_value = None
        self.max_value = None
        self.row = 0
        self.col = 0
        self.train_num = 0
        self.test_num = 0
        self.i_layer = 0
        self.h_layer = 0
        self.o_layer = 0
        self.x_train = []
        self.y_train = []
        self.x_test = []
        self.y_test = []
        self.preds = []
        self.scaler = None
        self.x_norm_train = []
        self.y_norm_train = []
        self.x_norm_test = []
        self.y_norm_test = []

    # make normalize of matrix
    def __make_normalize_matrix(self, mat):
        return np.divide(mat - self.min_value, self.max_value - self.min_value)

    # make anti-normalize of matrix
    def __make_anti_normalize_matrix(self, mat):
        return mat * (self.max_value - self.min_value) + self.min_value

    # sigmoid function
    @staticmethod
    def sigmoid(x):
        return 1. / (1. + np.exp(-x))

    @staticmethod
    def sigmoid_mat(dta):
        n, m = dta.shape
        matrix = np.zeros(shape=(n, m))
        for i in range(n):
            for j in range(m):
                matrix[i, j] = float(BPMethod.sigmoid(dta[i, j]))

        return matrix

    def load_data(self, data, folds=0.8):
        # set config
        self.min_value = data.min()
        self.max_value = data.max()
        self.row, self.col = data.shape
        self.i_layer = self.col - 1
        self.o_layer = 1

        # split
        matrix = self.__make_normalize_matrix(data)
        size = math.floor(self.row * folds)
        self.train_num = size
        self.test_num = self.row - size
        self.x_train = matrix[0: size, 0: self.i_layer].T
        self.y_train = matrix[0: size, self.i_layer: self.i_layer + 1].T
        self.x_test = matrix[size: self.row, 0: self.i_layer].T
        self.y_test = matrix[size: self.row, self.i_layer: self.i_layer + 1].T

        # normalize
        self.scaler = MinMaxScaler(feature_range=(-1, 1))

        # for fit-transform
        self.x_norm_train = self.scaler.fit_transform(self.x_train)
        self.y_norm_train = self.scaler.fit_transform(self.y_train)
        self.x_norm_test = self.scaler.fit_transform(self.x_test)
        self.y_norm_test = self.scaler.fit_transform(self.y_test)

    def fit(self):
        # init config
        Wih = random.random(size=(self.h_layer, self.i_layer))  # Wih(i, j): iL(i) -> hL(j)
        Woh = random.random(size=(self.o_layer, self.h_layer))  # Woh(i, j): oL(i) -> hL(j)
        Bih = random.random(size=(self.h_layer, 1))  # Bih(i): max(iL -> hL)
        Boh = random.random(size=(self.o_layer, 1))  # Boh(i): max(iL -> hL)
        error_history = []

        # train
        for i in range(self.max_epochs):
            # calculate net-out
            matrix = Wih @ self.x_norm_train + np.tile(Bih, self.train_num)
            hidden_out = BPMethod.sigmoid_mat(matrix)
            network_out = Woh @ hidden_out + np.tile(Boh, self.train_num)

            # calculate error
            error = self.y_norm_train - network_out
            SSE = ArrayUtil.sum_sqrare_array(error[0, :])
            error_history.append(SSE)
            if SSE < self.error:
                break

            # counter propagation
            d_h = error
            d_o = Woh.T @ d_h * hidden_out * (1 - hidden_out)
            d_wih = d_o @ self.x_norm_train.T
            d_woh = d_h @ hidden_out.T
            d_bih = d_o @ np.ones((self.train_num, 1))
            d_boh = d_h @ np.ones((self.train_num, 1))

            # update W, B
            Wih += self.learning_rate * d_wih
            Woh += self.learning_rate * d_woh
            Bih += self.learning_rate * d_bih
            Boh += self.learning_rate * d_boh

        # set W, B
        self.Wih = Wih
        self.Woh = Woh
        self.Bih = Bih
        self.Boh = Boh

    def predict(self):

        Wih = self.Wih
        Woh = self.Woh
        Bih = self.Bih
        Boh = self.Boh

        # calculate netOut
        matrix = Wih @ self.x_norm_test + np.tile(Bih, self.test_num)
        hidden_out = BPMethod.sigmoid_mat(matrix)
        network_out = Woh @ hidden_out + np.tile(Boh, self.test_num)

        # calculate predict
        predict = self.scaler.inverse_transform(network_out)
        test = self.scaler.inverse_transform(self.y_norm_test)

        test = self.__make_anti_normalize_matrix(test)
        self.preds = self.__make_anti_normalize_matrix(predict)

        return test, self.preds

    # calculate MSE, MAE, R2
    def get_error(self):
        tests = self.__make_anti_normalize_matrix(self.y_test)
        tests = tests[0, :].tolist()
        preds = self.preds[0, :].tolist()
        MSE = mean_squared_error(tests, preds)
        MAE = mean_absolute_error(tests, preds)
        R2 = r2_score(tests, preds)

        return {'MSE': MSE, 'MAE': MAE, 'R2': R2}


if __name__ == "__main__":
    bp_net = BPMethod()
    print(bp_net.__class__.__name__)
