def demean(X):
    return X - np.mean(X, axis=0)


def J(X, w):
    return np.sum((X.dot(w)) ** 2) / len(X)


def dJ(X, w):
    return X.T.dot(X.dot(w)) * 2 / len(X)


def substract(X, w):
    return X - X.dot(w).reshape(-1, 1) * w


def direction(w):
    return w / np.linalg.norm(w)


def gradient_asccent(X, initial_w, n_iters=1e4, eta=0.01, epsilon=1e-8):
    """单"""
    # 将w转换成单位向量
    w = direction(initial_w)
    i_iter = 0
    while i_iter < n_iters:
        dw = dJ(X, w)
        last_w = w
        w = w + eta * dw
        w = direction(w)
        if np.abs(J(X, w) - J(X, last_w)) < epsilon:
            break
        i_iter += 1
    return w


def gradient_asccent_n(n, X, initial_w, n_iters=1e4, eta=0.01, epsilon=1e-8):
    """多"""  # 将X做demean处理
    X_pca = X.copy()
    X_pca = demean(X)
    res = []
    for i in range(n):
        initial_w = np.random.random(X_pca.shape[1])
        w = gradient_asccent(X_pca, initial_w)
        res.append(w)
        X_pca = substract(X_pca, w)
    return res
