from __future__ import division
import random
import math
import numpy as np


# 获取output中的最大值索引
def get_fired_neuron(output):
    max = 0
    for i in range(len(output)):
        if output[i] > output[max]:
            max = i
    return max


class RBFNetwork:
    """
    数据中心采用 "梯度下降法" 选取
    缺点: 对大数据量耗时过久(7000+条,执行时间是小时级别)
    """
    Z = []

    def __init__(self, inputDim, hiddenNum, outputDim, X, Y, learning_rate=0.005):
        """
        :param inputDim:   输入层维度
        :param hiddenNum:  隐含层数量
        :param outputDim:  输出层维度
        :param X:          输入数据
        :param Y:          输出结果. 这里必须输入二维变量
        """
        self.inputDim = inputDim
        self.hiddenNum = hiddenNum
        self.outputDim = outputDim
        self.X = X
        self.Y = Y
        self.x = np.zeros(self.inputDim)
        self.y = np.zeros(self.outputDim)
        self.centroid = np.zeros((self.hiddenNum, self.inputDim))
        self.sigma = np.zeros(self.hiddenNum)
        self.hidden_output = np.zeros(self.hiddenNum)  # 隐藏层输出g,这里是针对一条数据的输出,不是G
        self.hidden_to_output_weight = np.zeros((self.hiddenNum, self.outputDim))  # 即权重w
        self.output_bias = np.zeros(self.outputDim)
        self.actual_target_values = []
        self.total = 0
        self.learning_rate = learning_rate
        self.setup_center()
        self.setup_sigma_spread_radius()
        self.set_up_hidden_to_output_weight()
        self.set_up_output_bias()
        self.error_of_output_layer = [0 for i in range(self.outputDim)]
        self.mean_error = 0
        self.cor_sta = 0.5

    def setup_center(self):
        """Setup center using clustering ,for now just randomize between 0 and 1"""
        for i in range(self.hiddenNum):
            self.centroid[i] = np.random.uniform(0, 1, self.inputDim)

    def setup_sigma_spread_radius(self):
        for i in range(self.hiddenNum):
            center = self.centroid[i]
            self.sigma[i] = self.set_up_sigma_for_center(center)

    def set_up_sigma_for_center(self, center):
        p = self.hiddenNum / 3
        distances = [0 for i in range(self.hiddenNum)]
        for i in range(self.hiddenNum):
            distances[i] = self.euclidean_distance(center, self.centroid[i])

        sum = 0
        for i in range(int(p)):
            nearest = self.get_smallest_index(distances)
            distances[nearest] = float("inf")

            neighbour_centroid = self.centroid[nearest]
            for j in range(len(neighbour_centroid)):
                sum += (center[j] - neighbour_centroid[j]) ** 2

        sigma = sum / p
        sigma = math.sqrt(sigma)
        # return random.uniform(0, 1) * 6
        return sigma

    @staticmethod
    def euclidean_distance(x, y):
        return np.linalg.norm(x - y)

    @staticmethod
    def get_smallest_index(distances):
        min_index = 0
        for i in range(len(distances)):
            if distances[min_index] > distances[i]:
                min_index = i
        return min_index

    def set_up_hidden_to_output_weight(self):
        # print("set_up_hidden_to_output_weight()")
        self.hidden_to_output_weight = np.random.uniform(0, 1, (self.hiddenNum, self.outputDim))

        print("Hidden to output weight ", self.hidden_to_output_weight)

    def set_up_output_bias(self):
        # print("set_up_output_bias()")
        self.output_bias = np.random.uniform(0, 1, self.outputDim)

    # Train an epoch and return total MSE
    def pass_one_epoch(self):
        # print("pass_one_epoch()")
        all_error = 0
        all_index = []
        for i in range(len(self.X)):
            all_index.append(i)
        # print("All index ",all_index)

        for i in range(len(self.X)):
            random_index = int(random.uniform(0, 1) * len(all_index))
            # print("Random index ",random_index, " Len ", len(all_index))
            """Get a random pattern to train"""
            x = self.X[random_index]
            y = self.Y[random_index]
            del all_index[random_index]

            self.y = y

            self.actual_target_values = y
            self.pass_input_to_network(x)

            error = self.get_error_for_pattern()
            all_error += error
            self.gradient_descent(x)

        all_error = all_error / (len(self.X))
        return all_error

    def pass_input_to_network(self, x):
        g = self.pass_to_hidden_node(x)
        return self.pass_to_output_node(g)

    # φ()函数, 隐藏层节点的输出
    def pass_to_hidden_node(self, x):
        """
        φ()函数,输入x -> 隐含层输出g
        :return:  隐含层输出g
        """
        self.hidden_output = np.zeros(self.hiddenNum)
        for i in range(len(self.hidden_output)):
            euclid_distance = self.euclidean_distance(x, self.centroid[i]) ** 2
            self.hidden_output[i] = math.exp(- (euclid_distance / (2 * self.sigma[i] * self.sigma[i])))
        return self.hidden_output

    def pass_to_output_node(self, hidden_output):
        """
        隐含层输出g -> 输出y
        :return:
        """
        self.y = [0 for i in range(self.outputDim)]
        total = 0
        for i in range(self.outputDim):
            for j in range(self.hiddenNum):
                self.y[i] += self.hidden_to_output_weight[j][i] * hidden_output[j]

        """Normalize """
        for i in range(self.outputDim):
            total += self.y[i]

        for i in range(self.outputDim):
            if self.y[i] != 0:
                self.y[i] = self.y[i] / total
        self.total = total
        return self.y

    # Compute error for the pattern
    def get_error_for_pattern(self):
        error = 0
        for i in range(len(self.y)):
            error += (self.actual_target_values[i] - self.y[i]) ** 2

        return error

    # Weight update by gradient descent algorithm
    def gradient_descent(self, x):
        # compute the error of output layer
        for i in range(self.outputDim):
            self.error_of_output_layer[i] = float(self.actual_target_values[i] - self.y[i])
            e = float(self.actual_target_values[i] - self.y[i]) ** 2 * 0.5
            self.mean_error += e

        # Adjust hidden to output weight
        for o in range(self.outputDim):
            for h in range(self.hiddenNum):
                delta_weight = self.learning_rate * self.error_of_output_layer[o] * self.hidden_output[h]
                self.hidden_to_output_weight[h][o] += delta_weight

        # For bias
        for o in range(self.outputDim):
            delta_bias = self.learning_rate * self.error_of_output_layer[o]
            self.output_bias[o] += delta_bias

        # Adjust center , input to hidden weight
        for i in range(self.inputDim):
            for j in range(self.hiddenNum):
                summ = 0
                for p in range(self.outputDim):
                    summ += self.hidden_to_output_weight[j][p] * (self.actual_target_values[p] - self.y[p])

                second_part = float((x[i] - self.centroid[j][i]) / math.pow(self.sigma[j], 2))
                delta_weight = float(self.learning_rate * self.hidden_output[j] * second_part * summ)
                self.centroid[j][i] += delta_weight

        # Adjust sigma and spread radius
        for i in range(self.inputDim):
            for j in range(self.hiddenNum):
                summ = 0
                for p in range(self.outputDim):
                    summ += self.hidden_to_output_weight[j][p] * (self.actual_target_values[p] - self.y[p])

                second_part = float(
                    (math.pow((x[i] - self.centroid[j][i]), 2)) / math.pow(self.sigma[j], 3))
                delta_weight = float(0.1 * self.learning_rate * self.hidden_output[j] * second_part * summ)
                self.sigma[j] += delta_weight
        return self.mean_error

    def set_cor_sta(self, cor_sta):
        self.cor_sta = cor_sta

    # train n iteration
    def fit(self, n):
        error = None
        for i in range(n):
            error = self.pass_one_epoch()
            print("Iteration: ", i, "  Error: ", error)  # 输出迭代次数与误差
        return error

    def transform(self, X, is_sigmoid=True):
        """
        :param X: matrix of dimensions n x inDim
        :param is_sigmoid:  True则返回0,1化后的结果
        :return: 预测值,是小数
        """
        Z = np.empty(shape=[X.shape[0], self.outputDim])
        if is_sigmoid:
            for i in range(len(X)):
                x = X[i]
                g = self.pass_to_hidden_node(x)
                z = np.dot(g, self.hidden_to_output_weight)
                Z[i] = [1 if ans >= self.cor_sta else 0 for ans in z]
        else:
            for i in range(len(X)):
                x = X[i]
                g = self.pass_to_hidden_node(x)
                z = np.dot(g, self.hidden_to_output_weight)
                Z[i] = z
        return Z

    def get_accuracy(self):
        correct = 0
        for i in range(len(self.X)):
            x = self.X[i]
            z = self.pass_input_to_network(x)  # 实际输出y'
            y = self.Y[i]  # 给定输出y
            z_sigmoid = [1 if ans >= self.cor_sta else 0 for ans in z]
            n_neuron = get_fired_neuron(z_sigmoid)
            a_neuron = get_fired_neuron(y)

            if n_neuron == a_neuron:
                correct += 1
        accuracy = float(correct / len(self.X)) * 100
        return accuracy


def testFun():
    X = np.array([
        [0, 0, 0],
        [0, 1,2],
        [1, 0,1],
        [1, 1,1],
    ])

    Y = np.array([
        [1, 0],
        [0, 1],
        [0, 1],
        [1, 0],
    ])

    rbf = RBFNetwork(len(X[0]), 6, len(Y[0]), X, Y)
    mse = rbf.fit(1500)
    Z = rbf.transform(X)
    print(Z)
    acc = rbf.get_accuracy()
    print("Total accuracy is ", acc)
    print("Last MSE ", mse)
