import numpy as np

class SCN:
    def __init__(self, L_max=None, T_max=None, tol=None, Lambdas=None, r=None, nB=None):
        # 初始化属性
        self.Name = 'Stochastic Configuration Networks'
        self.version = '1.0 beta'
        self.L = 1  # 隐藏节点数量，初始为1
        self.W = None  # 输入权重矩阵
        self.b = None  # 隐藏层偏置向量
        self.Beta = None  # 输出权重向量
        # 配置参数
        self.r = r if r is not None else [0.9, 0.99, 0.999, 0.9999, 0.99999, 0.99999]
        self.tol = tol if tol is not None else 1e-4
        self.Lambdas = Lambdas if Lambdas is not None else [0.5, 1, 3, 5, 7, 9, 15, 25, 50, 100, 150, 200]
        self.L_max = L_max if L_max is not None else 100
        self.T_max = T_max if T_max is not None else 100
        # 其他参数
        self.nB = nB if nB is not None else 1  # 每次循环中需要添加的节点数量，默认为1
        self.verbose = 5  # 显示频率，默认为50
        self.COST = 0   # 最终误差

        # 如果 L_max 很大，调整 verbose
        if self.L_max > 5000:
            self.verbose = 500

    def logsig(self, x):
        # S型激活函数
        return 1 / (1 + np.exp(-x))

    def RMSE(self, E):
        # 计算均方根误差
        return np.sqrt(np.mean(E ** 2))

    def OneHotMatrix(self, O):
        # 将输出转换为独热编码
        O_labels = np.zeros_like(O)
        indices = np.argmax(O, axis=1)
        O_labels[np.arange(len(O)), indices] = 1
        return O_labels

    def InequalityEq(self, eq, gk, r_L):
        # 不等式方程，返回 ksi 值
        numerator = (np.dot(eq.T, gk)) ** 2
        denominator = np.dot(gk.T, gk)
        ksi = numerator / denominator - (1 - r_L) * np.dot(eq.T, eq)
        return ksi

    def SC_Search(self, X, E0):
        # 搜索 nB 个节点的 {WB, bB}
        Flag = 0  # 0：继续；1：停止
        WB = []
        bB = []
        n_samples, d = X.shape  # 输入样本数和特征维度
        _, m = E0.shape  # 输出样本数和维度
        C = []  # 存储 ksi 值的容器
        nC = 0
        for Lambda in self.Lambdas:
            # 生成用于选择的 T_max 个随机权重和偏置
            WT = Lambda * (2 * np.random.rand(d, self.T_max) - 1)
            bT = Lambda * (2 * np.random.rand(1, self.T_max) - 1)
            HT = self.logsig(np.dot(X, WT) + bT)
            for r_L in self.r:
                # 计算每个候选节点的 ksi 值
                for t in range(self.T_max):
                    H_t = HT[:, t]
                    ksi_m = np.zeros(m)
                    for i_m in range(m):
                        eq = E0[:, i_m]
                        gk = H_t
                        ksi_m[i_m] = self.InequalityEq(eq, gk, r_L)
                    Ksi_t = sum(ksi_m)
                    if min(ksi_m) > 0:
                        C.append(Ksi_t)
                        WB.append(WT[:, t])
                        bB.append(bT[0, t])
                        nC += 1
                if nC >= self.nB:
                    break  # 跳出 r_L 循环
            if nC >= self.nB:
                break  # 跳出 Lambda 循环
        if nC >= self.nB:
            # 按降序排序并选择前 nB 个节点
            I = np.argsort(C)[::-1]
            I_nb = I[:self.nB]
            WB = [WB[i] for i in I_nb]
            bB = [bB[i] for i in I_nb]
        else:
            # 放弃权重和偏置
            print('搜索结束...')
            Flag = 1
            WB = []
            bB = []
        return WB, bB, Flag

    def AddNodes(self, w_L, b_L):
        # 将新节点添加到模型中
        w_L_array = np.array(w_L).T  # 形状为 (d, nB)
        b_L_array = np.array(b_L).reshape(1, -1)  # 形状为 (1, nB)
        if self.W is None:
            self.W = w_L_array
        else:
            self.W = np.concatenate((self.W, w_L_array), axis=1)
        if self.b is None:
            self.b = b_L_array
        else:
            self.b = np.concatenate((self.b, b_L_array), axis=1)
        self.L = self.b.shape[1]

    def UpgradeSCN(self, X, T):
        # 计算 Beta、输出、误差向量和成本
        H = self.GetH(X)
        self.ComputeBeta(H, T)
        O = np.dot(H, self.Beta)
        E = T - O
        Error = self.RMSE(E)
        self.COST = Error
        return O, E, Error

    def ComputeBeta(self, H, T):
        # 计算输出权重 Beta
        self.Beta = np.linalg.pinv(H).dot(T)

    def Regression(self, X, T):
        # 回归训练方法
        per = {'Error': []}
        E = T
        Error = self.RMSE(E)
        print(self.Name)
        while (self.L < self.L_max) and (Error > self.tol):
            if self.L % self.verbose == 0:
                print(f'L:{self.L}\t\tRMSE:{Error:.6f}')
            w_L, b_L, Flag = self.SC_Search(X, E)
            if Flag == 1:
                break
            self.AddNodes(w_L, b_L)
            O, E, Error = self.UpgradeSCN(X, T)
            per['Error'].extend([Error] * self.nB)
        print(f'#L:{self.L}\t\tRMSE:{Error:.6f}')
        print('*' * 30)
        return per

    def Classification(self, X, T):
        # 分类训练方法
        per = {'Error': [], 'Rate': []}
        E = T
        Error = self.RMSE(E)
        Rate = 0
        print(self.Name)
        while (self.L < self.L_max) and (Error > self.tol):
            if self.L % self.verbose == 0:
                print(f'L:{self.L}\t\t RMSE:{Error:.6f}; \t\tRate:{Rate:.2f}')
            w_L, b_L, Flag = self.SC_Search(X, E)
            if Flag == 1:
                break
            self.AddNodes(w_L, b_L)
            O, E, Error = self.UpgradeSCN(X, T)
            O_labels = self.GetLabel(X)
            T_labels = np.argmax(T, axis=1)
            Rate = self.GetAccuracy(O_labels, T_labels)
            per['Error'].extend([Error] * self.nB)
            per['Rate'].extend([Rate] * self.nB)
        print(f'#L:{self.L}\t\t RMSE:{Error:.6f}; \t\tRate:{Rate:.2f}')
        print('*' * 30)
        return per

    def GetH(self, X):
        # 计算隐藏层的输出矩阵
        H = self.ActivationFun(X)
        return H

    def ActivationFun(self, X):
        # 激活函数（对数 S 型函数）
        H = self.logsig(np.dot(X, self.W) + self.b)
        return H

    def GetOutput(self, X):
        # 计算网络的输出
        H = self.GetH(X)
        O = np.dot(H, self.Beta)
        return O

    def GetLabel(self, X):
        # 获取预测的标签
        O = self.GetOutput(X)
        O_labels = np.argmax(O, axis=1)
        return O_labels

    def GetAccuracy(self, O_labels, T_labels):
        # 计算准确率
        correct = np.sum(O_labels == T_labels)
        total = len(T_labels)
        Rate = correct / total
        return Rate

    def GetResult(self, X, T):
        # 计算误差、输出、隐藏矩阵和误差向量
        H = self.GetH(X)
        O = np.dot(H, self.Beta)
        E = T - O
        Error = self.RMSE(E)
        return Error, O, H, E
