from sklearn.decomposition import PCA           # 加载PCA算法包
from sklearn.datasets import load_iris
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
import seaborn as sns                           # 用作画图
import matplotlib.pyplot as plt                 # 加载matplotlib用于数据的可视化
import joblib #jbolib模块 用于保存模型

# 自己定义的PCA算法类
class MyPCA():

    def __init__(self, data=None, target=None):
        self.data = data
        self.target = target
        

    
    def CovarianceMatrixPlot(self, data):
        X_std = StandardScaler().fit_transform(data)
        X_train = X_std # Create a reference to the origin
        X_mean = np.mean(X_train, axis=0)   # A vector with size (4,)
        n = X_train.shape[0]
        X_cov = (X_train - X_mean).T @ (X_train - X_mean) / n
        # X_cov   # A symmetric matrix with size (4,4)

        labs = ['1', '2', '3', '4']    #放入特征值的编号 比如['1', '2'...]
        
        sns.heatmap(X_cov, annot=True, fmt='g', cmap="YlGnBu", xticklabels=labs, yticklabels=labs)   # 数字越大，颜色越深
        plt.show()

    # 用来获取特征根 特征向量 各个特征的贡献度 以及他们成分矩阵
    def parse(self):
        original_data = load_iris()
        y = original_data.target
        x = original_data.data
        pca = PCA()     # 加载PCA算法，设置降维后主成分数目为2
        reduced_x = pca.fit_transform(x)   # 对样本进行降维
        # 上面是模型的训练
        # 中间是模型的保存
        #保存Model(注:save文件夹要预先建立，否则会报错)
        joblib.dump(pca, r'./save/pca.pkl')

        # 下面是模型的参数输出
        explained_var = pca.explained_variance_  # 贡献方差，即特征根
        components = pca.components_   # 特征向量 本例中为2x4 因为被降维成4列
        explained_var_rat = pca.explained_variance_ratio_ # 获取方差贡献率
        k1_spss = components / np.sqrt(explained_var.reshape(-1, 1))  # 成分得分系数矩阵

        # 确定权重
        # 求指标在不同主成分线性组合中的系数
        j = 0
        Weights = []
        for j in range(len(k1_spss)):
            for i in range(len(explained_var)):
                Weights_coefficient = np.sum(100 * (explained_var_rat[i]) * (k1_spss[i][j])) / np.sum(explained_var_rat)
            j = j + 1
            Weights.append(np.float(Weights_coefficient))
        # 将权重进行归一化 使用最大最小归一化
        Weights = pd.DataFrame(Weights)
        Weights1 = preprocessing.MinMaxScaler().fit(Weights)
        Weights2 = Weights1.transform(Weights)

        # 返回各个结果
        print("特征根为:", explained_var)
        print("特征向量为:", components)
        print("方差贡献率为:", explained_var_rat)
        print("成分得分系数为:", k1_spss)
        print("各个特征值的权重:", Weights)
        print("归一化后特征值的权重:", Weights2)



        # #可视化
        # red_x, red_y = [], []
        # blue_x, blue_y = [], []
        # green_x, green_y = [], []
        
        # for i in range(len(reduced_x)):
        #     if y[i] == 0:
        #         red_x.append(reduced_x[i][0])
        #         red_y.append(reduced_x[i][1])
        #     elif y[i] == 1:
        #         blue_x.append(reduced_x[i][0])
        #         blue_y.append(reduced_x[i][1])
        #     else:
        #         green_x.append(reduced_x[i][0])
        #         green_y.append(reduced_x[i][1])
        # plt.scatter(red_x, red_y, c='r', marker='x')
        # plt.scatter(blue_x, blue_y, c='b', marker='D')
        # plt.scatter(green_x, green_y, c='g', marker='.')
        # plt.show()

if __name__ == '__main__':
    mp = MyPCA()
    original_data = load_iris()
    mp.CovarianceMatrixPlot(original_data.data)
    # mp.parse()

