import numpy as np
from sklearn.cluster import KMeans, DBSCAN
from matplotlib import pyplot as plt
from scipy.linalg import norm, pinv

from machine_learning.cluster.fuzzy_clustering.algorithms.fcm import FCM


class MyRBF_NeuralNetwork:
    """
    数据中心采用聚类算法实现
    """
    Z = []

    def __init__(self, inDim, numCenters, outDim, pattern=0):
        """
        :param inDim:       输入层维度
        :param numCenters:  隐含层数量
        :param outDim:      输出层维度
        :param pattern:     模式. 0: 随机中心, 1: KMeans聚类中心
        """
        self.inDim = inDim
        self.outDim = outDim
        self.numCenters = numCenters
        self.X = None
        self.Y = None
        self.label_pred = np.empty((0, inDim))
        self.centers = [np.random.uniform(-1, 1, inDim) for i in range(numCenters)]  # 生成[-1,1)内indim个随机实数组成数组,共生成numCenters次
        self.pattern = pattern
        self.beta = 8
        self.W = np.random.random((self.numCenters, self.outDim))

    def cluster_plot_2d(self):
        plt.figure(figsize=(8, 6))
        plt.subplot(1, 1, 1)
        color = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']  # 这里'or'代表中的'o'代表画圈，'r'代表颜色为红色，后面的依次类推
        for i in range(len(self.label_pred)):
            plt.plot([self.X[i, 0]], [self.X[i, 1]], color[self.label_pred[i]], markersize=5)
        for centroid in self.centers:
            plt.scatter(centroid[0], centroid[1], c='black', marker='x')
        # plt.legend()
        plt.show()

    def cluster_plot_3d(self):
        plt.figure(figsize=(8, 6))
        ax = plt.subplot(1, 1, 1, projection='3d')
        color = ['r', 'g', 'b', 'c', 'y', 'm', 'k', '#3a3a3a', '#a11a11', '#2b2b2b']
        for i in range(len(self.label_pred)):
            ax.scatter(self.X[i, 0], self.X[i, 1], self.X[i, 2], c=color[self.label_pred[i]])
        for centroid in self.centers:
            ax.scatter(centroid[0], centroid[1], c='black', marker='x')
        plt.show()

    def basis_func(self, xp, x):
        """
        径向基函数 φ(||x-xp||)
        """
        assert len(x) == self.inDim
        return np.exp(-self.beta * norm(xp - x) ** 2)

    # 加 _ 为私有函数,外界调用不到
    def _calcAct(self, X):
        """
        计算每个样本点到中心点(隐含层)的径向基函数值. G·W得到Z
        """
        # calculate activations of RBFs
        G = np.zeros((X.shape[0], self.numCenters), float)
        for ci, c in enumerate(self.centers):  # centers为中心点,也就是RBFNN的隐含层节点,也就是样本聚类的中心点
            for xi, x in enumerate(X):
                G[xi, ci] = self.basis_func(c, x)  # 第xi个数据点,离第ci个样本点的距离的函数,即径向基函数φ(||x-xp||)
                # G[i,j]就是G[i][j]
        return G

    def KMeans_cluster(self, X, max_iter):
        km = KMeans(n_clusters=self.numCenters, max_iter=max_iter)
        km_fit = km.fit(X)
        self.label_pred = km_fit.labels_  # 获取聚类标签(分为n类,相同类标签相同)
        self.centers = km_fit.cluster_centers_  # 获取聚类中心(坐标)
        return self.label_pred, self.centers

    def FCMeans_cluster(self, X, max_iter):
        fcm = FCM(n_clusters=self.numCenters, max_iter=max_iter)
        fcm.fit(X)
        # 模糊聚类的每一个点不明确属于某一个簇,因此没有self.label_pred
        self.centers = fcm.centers
        return self.centers

    def DBSCAN_cluster(self, X, eps, min_samples):
        dbs = DBSCAN(eps=eps, min_samples=min_samples)
        dbs.fit(X)

        self.centers = dbs.components_
        # dbscan的核心个数不受控制,因此需要更新
        self.numCenters = dbs.components_.shape[0]
        return self.centers

    def fit_random_center(self, X, Y):
        """
        训练. 数据中心采用随机选取的模式
        """
        self.X = X
        self.Y = Y
        # choose random center vectors from training set 从训练set中选择随机中心向量
        rnd_idx = np.random.permutation(X.shape[0])[:self.numCenters]  # 生成随机顺序的序列,每个元素大小为[0,X.shape[0]),不重复; 然后再取前numCenters个
        self.centers = [X[i, :] for i in rnd_idx]  # X每行只有一个元素,因此X[i,:]中:只能取0
        # print("center", self.centers)
        # calculate activations of RBFs
        G = self._calcAct(X)
        # print("计算每个样本点到中心点(隐含层)的径向基函数值. G·W得到Z. G矩阵:")
        # print(G)  # G:总数据条数X.shape[0] (行) × self.numCenters数据中心个数 (列)
        # 计算输出权重(伪逆) pinv是计算广义逆
        self.W = np.dot(pinv(G), Y)

    def fit_KMeans_center(self, X, Y, max_iter=100):
        """
        训练. 数据中心采用k-means聚类得出
        """
        self.X = X
        self.Y = Y
        # 先进行聚类中心训练
        self.KMeans_cluster(X, max_iter)
        # print("center", self.centers)
        # calculate activations of RBFs
        G = self._calcAct(X)
        # print("计算每个样本点到中心点(隐含层)的径向基函数值. G·W得到Z G矩阵:")
        # print(G)  # G:总数据条数X.shape[0] (行) × self.numCenters数据中心个数 (列)
        # 计算输出权重(伪逆) pinv是计算广义逆
        self.W = np.dot(pinv(G), Y)

    def fit_FCMeans_center(self, X, Y, max_iter=100):
        """
        训练. 数据中心采用c-means聚类得出
        """
        self.X = X
        self.Y = Y
        # 先进行聚类中心训练
        self.FCMeans_cluster(X, max_iter)
        # print("center", self.centers)
        # calculate activations of RBFs
        G = self._calcAct(X)
        # print("计算每个样本点到中心点(隐含层)的径向基函数值. G·W得到Z G矩阵:")
        # print(G)  # G:总数据条数X.shape[0] (行) × self.numCenters数据中心个数 (列)
        # 计算输出权重(伪逆) pinv是计算广义逆
        self.W = np.dot(pinv(G), Y)

    def fit_DBSCAN_center(self, X, Y, eps, min_samples):
        """
        训练. 数据中心采用dbscan聚类得出
        """
        self.X = X
        self.Y = Y
        # 先进行聚类中心训练
        self.DBSCAN_cluster(X, eps, min_samples)
        # print("center", self.centers)
        # calculate activations of RBFs
        G = self._calcAct(X)
        # print("计算每个样本点到中心点(隐含层)的径向基函数值. G·W得到Z G矩阵:")
        # print(G)  # G:总数据条数X.shape[0] (行) × self.numCenters数据中心个数 (列)
        # 计算输出权重(伪逆) pinv是计算广义逆
        self.W = np.dot(pinv(G), Y)

    def predict(self, X):
        """
        :param X: matrix of dimensions n x indim
        :return: 预测值,是小数
        """
        G = self._calcAct(X)
        self.Z = np.dot(G, self.W)
        return self.Z

    def get_accuracy(self, Y, Z=None, cor_sta=0.5):
        if Z is None:
            self.predict(self.X)
            Z = self.Z

        # 精度,分界线默认0.5:
        # 标准化后的数据集,对称轴为0,而sigmoid()横轴为0处值为0.5,所以大于0.5即在正半轴
        Z_sigmoid = [1 if ans >= cor_sta else 0 for ans in Z]
        correct = [1 if (a == b) else 0 for (a, b) in zip(Y, Z_sigmoid)]
        return sum(correct) / len(correct)


# %% == test ==
def test():
    n = 100
    x = np.mgrid[-1:1:complex(0, n)].reshape(n, 1)
    # set y and add random noise
    y = np.sin(3 * (x + 0.5) ** 3 - 1)
    # y += np.random.normal(0, 0.1, y.shape)

    # rbf regression. RBF: param1:输入维度; param2:中心点个数,即隐含层节点个数; param3:输出维度
    rbf = MyRBF_NeuralNetwork(1, 10, 1)
    rbf.fit_random_center(x, y)
    z = rbf.predict(x)

    # plot original data
    plt.figure(figsize=(12, 8))
    plt.plot(x, y, 'k-')  # 黑色曲线. x与y的关系
    # plot learned model
    plt.plot(x, z, 'r-', linewidth=2)  # 红色曲线. x与预测值z的关系
    # plot rbfs 画出径向基函数
    plt.plot(rbf.centers, np.zeros(rbf.numCenters), 'gs')
    for c in rbf.centers:
        # RF prediction lines
        cx = np.arange(c - 0.7, c + 0.7, 0.01)
        cy = [rbf.basis_func(np.array([cx_]), np.array([c])) for cx_ in cx]
        plt.plot(cx, cy, '-', color='gray', linewidth=0.2)

    plt.xlim(-1.2, 1.2)  # 限定范围
    plt.show()
