import numpy as np

class RSCN:
    def __init__(self, L_max=None, T_max=None, tol=None, Lambdas=None, r=None, nB=None, eta=None, alpha=None, n_samples=None, dimension =None, disgard=None):
        # 初始化属性
        self.Name = 'Recurrent Stochastic Configuration Networks'
        self.version = '1.0 beta'
        self.L = 1  # 隐藏节点数量，初始为1
        self.Win = None  # 输入权重矩阵
        self.Wr = None  # 状态连接权重矩阵
        self.b = None  # 隐藏层偏置向量
        self.Beta = None  # 输出权重向量
        self.U = None  # 保存中间状态向量
        self.n_samples = n_samples if n_samples is not None else 0 # 输入样本的数量
        self.eta = eta if eta is not None else 0.00001  # 正则化参数
        self.alpha = alpha if alpha is not None else 0.99 # 谱半径缩放因子
        self.disgard = disgard if disgard is not None else 50 # 抛弃初始部分
        self.W_Out = None # 输出权重矩阵
        self.dimension = dimension if dimension is not None else 1
        # 配置参数
        self.Win = np.random.rand(self.dimension, self.L) * 2 - 1
        self.Wr = np.random.rand(self.L, self.L) * 2 - 1
        self.Wr = self.Wr/max(abs(np.linalg.eigvals(self.Wr)))*self.alpha
        self.b = np.random.rand(1, self.L) * 2 - 1
        self.U = np.zeros((self.dimension, self.L))
        self.r = r if r is not None else [0.9, 0.99, 0.999, 0.9999, 0.99999, 0.99999]
        self.tol = tol if tol is not None else 1e-2
        self.Lambdas = Lambdas if Lambdas is not None else [0.5, 1.5, 10, 30,  50, 100]
        self.L_max = L_max if L_max is not None else 100
        self.T_max = T_max if T_max is not None else 100 #最大随机配置次数

        # 其他参数
        self.nB = nB if nB is not None else 1  # 每次循环中需要添加的节点数量，默认为1
        self.verbose = 5  # 显示频率，默认为50
        self.COST = 0   # 最终误差


        # 如果 L_max 很大，调整 verbose
        if self.L_max > 5000:
            self.verbose = 500

    def tanh(self, x):
        # S型激活函数
        #return 1 / (1 + np.exp(-x))
        return np.tanh(x)

    def RMSE(self, E):
        # 计算均方根误差
        return np.sqrt(np.mean(E ** 2))
    def NRMSE(self, y_true, y_pred):
        mse = np.mean((y_true - y_pred) ** 2)
        # 计算真实值的方差
        variance = np.var(y_true)
        # 计算 NRMSE
        nrmse = np.sqrt(mse / variance)
        return nrmse

    def OneHotMatrix(self, O):
        # 将输出转换为独热编码
        O_labels = np.zeros_like(O)
        indices = np.argmax(O, axis=1)
        O_labels[np.arange(len(O)), indices] = 1
        return O_labels

    def InequalityEq(self, eq, gk, r_L):
        # 不等式方程，返回 ksi 值
        numerator = (np.dot(eq.T, gk)) ** 2
        denominator = np.dot(gk.T, gk)
        ksi = numerator / denominator - (1 - r_L) * np.dot(eq.T, eq)
        return ksi

    def SC_Search(self, X, E0):
        # 搜索 nB 个节点的 {WB, bB}
        Flag = 0  # 0：继续；1：停止

        self.n_samples, d = X.shape  # 输入样本数和特征维度
        _, m = E0.shape  # 输出样本数和维度
        WinB = []
        WrB = []
        U = []
        bB = []
        C = []  # 存储 ksi 值的容器
        nC = 0
        #U = np.zeros((1,2))
        for Lambda in self.Lambdas:
            # 生成用于选择的 T_max 个随机权重和偏置
            #R_Tem = [0,0,0,0,0]
            WinT = Lambda * (2 * np.random.rand(d, self.T_max) - 1)
            WrT = Lambda * (2 * np.random.rand(self.L+1, self.T_max) - 1)
            bT = Lambda * (2 * np.random.rand(1, self.T_max) - 1)

            for r_L in self.r:
                # 计算每个候选节点的 ksi 值
                for t in range(self.T_max):
                    U_Tem = np.zeros((self.n_samples + 1, self.L + 1))
                    WinT_Tem = np.hstack((self.Win, WinT[:, t:t+1]))
                    WrT_Tem = np.concatenate((self.Wr, np.zeros((1, self.L))), axis=0)
                    WrT_Tem = np.hstack((WrT_Tem, WrT[:, t:t+1]))
                    WrT_Tem = WrT_Tem/max(abs(np.linalg.eigvals(WrT_Tem)))*self.alpha
                    #U_Tem = np.hstack((self.U, np.zeros((1,1))))
                    for X_index in range(self.n_samples):
                        #m=self.logsig(np.dot(X[X_index:X_index + 1, :], WinT_Tem) + bT[t] + np.dot(U_Tem, WrT_Tem))
                        U_Tem[X_index+1,:] =  self.tanh(np.dot(X[X_index,:], WinT_Tem) + bT[:,t] + np.dot(U_Tem[X_index, :], WrT_Tem))
                    U_Tem = U_Tem[1:self.n_samples+1,self.L:self.L+1]

                    ksi_m = np.zeros(m)
                    for i_m in range(m):
                        eq = E0[:, i_m]
                        gk = U_Tem
                        ksi_m[i_m] = self.InequalityEq(eq, gk, r_L)
                    Ksi_t = sum(ksi_m)
                    if min(ksi_m) > 0:
                        C.append(Ksi_t)

                        WinB.append(WinT[:, t])
                        WrB.append(WrT[:, t])
                        U.append(U_Tem)
                        bB.append(bT[:,t])
                        nC += 1
                if nC >= self.nB:
                    break  # 跳出 r_L 循环
            if nC >= self.nB:
                break  # 跳出 Lambda 循环
        if nC >= self.nB:
            # 按降序排序并选择前 nB 个节点
            I = np.argsort(C)[::-1]
            I_nb = I[:self.nB]
            WinB = [WinB[i] for i in I_nb]
            WrB = [WrB[i] for i in I_nb]
            U = [U[i] for i in I_nb]
            U = np.array(U)[0]
            self.U = U
            bB = [bB[i] for i in I_nb]
        else:
            # 放弃权重和偏置
            print('搜索结束...')
            Flag = 1
            WinB = []
            WrB = []
            U = []
            bB = []
        return WinB, WrB, U, bB, Flag

    def AddNodes(self, WinB, WrB, bB):
        # 将新节点添加到模型中
        Win_L_array = np.array(WinB).T  # 形状为 (d, nB)
        Wr_L_array = np.array(WrB).T  # 形状为 (d, nB)
        B_L_array = np.array(bB).reshape(1, -1)  # 形状为 (1, nB)
        if self.Win is None:
            self.Win = Win_L_array
        else:
            self.Win = np.hstack((self.Win, Win_L_array))
        if self.Wr is None:
            self.Wr = Wr_L_array
        else:
            WrT_Tem = np.concatenate((self.Wr, np.zeros((1, self.L))), axis=0)
            self.Wr = np.hstack((WrT_Tem, Wr_L_array))
        if self.b is None:
            self.b = B_L_array
        else:
            self.b = np.concatenate((self.b, B_L_array), axis=1)
        self.L = self.b.shape[1]

    def UpgradeRSCN(self, X, U, T):
        # 计算 construct W∗  in,N+1, W∗  r,N+1, and  b∗  N+1, calculate g∗  N+1, and get XN+1、误差向量和成本
        #H = self.GetH(X)
        #self.ComputeBeta(H, T)
        #W_OUT = np.linalg.pinv(U.T @ U + self.eta * np.identity(1)) @ U.T  @ T
        U_Tem = np.zeros((self.n_samples+1, self.L))
        for X_index in range(self.n_samples):
            # m=self.logsig(np.dot(X[X_index:X_index + 1, :], WinT_Tem) + bT[t] + np.dot(U_Tem, WrT_Tem))
            U_Tem[X_index + 1, :] = self.tanh(
                np.dot(X[X_index, :], self.Win) + self.b + np.dot(U_Tem[X_index, :], self.Wr))
        U = U_Tem[1:self.n_samples+1, :]
        #T = T[self.disgard:self.n_samples+1, :]
        W_OUT = np.linalg.pinv(U.T @ U) @ U.T @ T

        #W_OUT = np.linalg.pinv(U).dot(T)
        self.W_Out = W_OUT
        #O = np.dot(H, self.Beta)
        O = U @ W_OUT
        E = T - O
        #Error = self.RMSE(E)
        Error = self.NRMSE(T, O)
        self.COST = Error
        return O, E, Error

    def ComputeBeta(self, H, T):
        # 计算输出权重 Beta
        self.Beta = np.linalg.pinv(H).dot(T)

    def Regression(self, X, T):
        # 回归训练方法
        per = {'Error': []}
        E = T
        Error = self.RMSE(E)
        print(self.Name)
        while (self.L < self.L_max) and (Error > self.tol):
            if self.L % self.verbose == 0:
                print(f'L:{self.L}\t\tNRMSE:{Error:.6f}')
            WinB, WrB, U, bB, Flag = self.SC_Search(X, E)
            if Flag == 1:
                break
            self.AddNodes(WinB, WrB, bB)
            O, E, Error = self.UpgradeRSCN(X, U, T)
            per['Error'].extend([Error] * self.nB)
        print(f'#L:{self.L}\t\tNRMSE:{Error:.6f}')
        print('*' * 30)
        return per

    def Classification(self, X, T):
        # 分类训练方法
        per = {'Error': [], 'Rate': []}
        E = T
        Error = self.RMSE(E)
        Rate = 0
        print(self.Name)
        while (self.L < self.L_max) and (Error > self.tol):
            if self.L % self.verbose == 0:
                print(f'L:{self.L}\t\t NRMSE:{Error:.6f}; \t\tRate:{Rate:.2f}')
            w_L, b_L, Flag = self.SC_Search(X, E)
            if Flag == 1:
                break
            self.AddNodes(w_L, b_L)
            O, E, Error = self.UpgradeSCN(X, T)
            O_labels = self.GetLabel(X)
            T_labels = np.argmax(T, axis=1)
            Rate = self.GetAccuracy(O_labels, T_labels)
            per['Error'].extend([Error] * self.nB)
            per['Rate'].extend([Rate] * self.nB)
        print(f'#L:{self.L}\t\t RMSE:{Error:.6f}; \t\tRate:{Rate:.2f}')
        print('*' * 30)
        return per

    def GetH(self, X):
        # 计算隐藏层的输出矩阵
        H = self.ActivationFun(X)
        return H

    def ActivationFun(self, X):
        # 激活函数（对数 S 型函数）
        H = self.logsig(np.dot(X, self.Win) +  + self.b)
        return H

    def GetOutput(self, X, test_x, pre_length):
        # 计算网络的输出
        #H = self.GetH(X)
        U_Tem = np.zeros((self.n_samples+1, self.L))
        for X_index in range(self.n_samples):
            # m=self.logsig(np.dot(X[X_index:X_index + 1, :], WinT_Tem) + bT[t] + np.dot(U_Tem, WrT_Tem))
            U_Tem[X_index + 1, :] = self.tanh(
                np.dot(X[X_index, :], self.Win) + self.b + np.dot(U_Tem[X_index, :], self.Wr))
        U_Tem = U_Tem[1+self.disgard:self.n_samples+1, :]
        #X1 = X[self.disgard:, :]
        #W_OUT = np.linalg.pinv(U_Tem.T @ U_Tem + self.eta * np.identity(self.L)) @ U_Tem.T @ X
        #W_OUT = np.linalg.pinv(U_Tem.T @ U_Tem ) @ U_Tem.T @ X1
        W_OUT = self.W_Out
        U_Tem = U_Tem[-1,:]


        out_put = np.zeros((pre_length, self.dimension))
        U_Tem_Pre = np.zeros((pre_length+1, self.L))
        U_Tem_Pre[0, :] = U_Tem
        out_put[0, ] = U_Tem @ W_OUT
        out_put = test_x.copy()
        for pre_index in range(pre_length-1):
            #out_put[pre_index,1:5] = test_x[pre_index,1:5]
            U_Tem_Pre[pre_index + 1, :] = self.tanh(
                np.dot(out_put[pre_index, :], self.Win) + self.b + np.dot(U_Tem_Pre[pre_index, :], self.Wr))
            #if pre_index > self.disgard:
            out_put[pre_index + 1, :] = U_Tem_Pre[pre_index + 1, :] @ W_OUT
        return out_put

    def GetLabel(self, X):
        # 获取预测的标签
        O = self.GetOutput(X)
        O_labels = np.argmax(O, axis=1)
        return O_labels

    def GetAccuracy(self, O_labels, T_labels):
        # 计算准确率
        correct = np.sum(O_labels == T_labels)
        total = len(T_labels)
        Rate = correct / total
        return Rate

    def GetResult(self, X, T):
        # 计算误差、输出、隐藏矩阵和误差向量
        H = self.GetH(X)
        O = np.dot(H, self.Beta)
        E = T - O
        Error = self.RMSE(E)
        return Error, O, H, E
