import numpy as np


class PCA:
    def __init__(self, n_components):
        self.n_components = n_components
        # 降维矩阵
        self.components_ = None

    def fit(self, X, eta=0.01, epsilon=1e-8, n_iters=1e4):

        def direction(w):
            return w / np.linalg.norm(w)

        def J(X, w):
            """模长的方差"""
            return np.sum((X.dot(w) ** 2)) / len(X)

        def dJ(X, w):
            """任意一点w向量的导数"""
            return X.T.dot(X.dot(w)) * 2. / len(X)

        def demean(X):
            """使X列方向上均值为0"""
            return X - np.mean(X, axis=0)

        def direct(w):
            """将w转化为单位向量 模长为1"""
            return w / np.linalg.norm(w)

        def substract(X, w):
            """去掉w方向上的成分"""
            return X - (X.dot(w).reshape(-1, 1) * w)

        def gradient_first(X, initial_w, eta=0.01, epsilon=1e-8, n_iters=1e4):
            """求一个主成分"""
            w = direct(initial_w)
            i_iter = 0
            while i_iter < n_iters:
                last_w = w
                w = w + eta * dJ(X, w)
                if (abs(J(X, w) - J(X, last_w))) < epsilon:
                    break
                i_iter += 1
            return w

        def gradient_first_n(X, eta=0.01, epsilon=1e-8, n_iters=1e4):
            """求前n个主成分"""
            X_pca = X.copy()
            X_pca = demean(X_pca)
            W = np.empty(shape=(self.n_components, X.shape[1]))
            for i in range(self.n_components):
                initial_w = np.random.uniform(size=X_pca.shape[1])
                w = gradient_first(X_pca, initial_w)
                W[i, :] = w
                X_pca = substract(X_pca, w)
            return W

        self.components_ = gradient_first_n(X)
        return self

    def transform(self, X):
        """将将数据映射到前n个主成分中"""
        return X.dot(self.components_.T)

    def inverse_transform(self, X_pca):
        """将数据恢复原始纬度 会丢失信息"""
        return X_pca.dot(self.components_)

    def __repr__(self):
        return "PCA(%d)" % self.n_components
