# 高斯分布的主成份分析
import argparse
from typing import Dict
import numpy as np
import matplotlib.pyplot as plt
import scipy.io

class Chp01Sec06(object):
    def __init__(self):
        self.name = ''

    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'Eigenfaces Example v0.0.1')
        plt.rcParams['figure.figsize'] = [10, 10]
        plt.rcParams.update({'font.size': 18})
        mat_contents = scipy.io.loadmat('study/ddse/supports/DATA_PYTHON/DATA/allFaces.mat')
        faces = mat_contents['faces'] # 32256, 2410; 32256=168*192，是一个人的图片，共2410个人
        m = int(mat_contents['m'][0][0]) # 168：图片宽度
        n = int(mat_contents['n'][0][0]) # 192：图片高度
        nfaces = np.ndarray.flatten(mat_contents['nfaces']) # 共分38组，每组由60~64个图片组
        allPersons = np.zeros((n*6,m*6)) # 保存供显示的6*6个人脸
        count = 0
        # 前36组中取出每组的第1个人脸图片
        for j in range(6):
            for k in range(6):
                allPersons[j*n : (j+1)*n, k*m : (k+1)*m] = np.reshape(faces[:,np.sum(nfaces[:count])],(m,n)).T
                count += 1
        img = plt.imshow(allPersons)
        img.set_cmap('gray')
        plt.axis('off')
        plt.show()
        # 
        # We use the first 36 people for training data
        trainingFaces = faces[:,:np.sum(nfaces[:36])]
        avgFace = np.mean(trainingFaces,axis=1) # size n*m by 1
        # Compute eigenfaces on mean-subtracted training data
        X = trainingFaces - np.tile(avgFace,(trainingFaces.shape[1],1)).T
        U, S, VT = np.linalg.svd(X,full_matrices=0)
        print(f'avgFace: {avgFace.shape}; U: {U.shape};')
        fig1 = plt.figure()
        ax1 = fig1.add_subplot(121)
        img_avg = ax1.imshow(np.reshape(avgFace,(m,n)).T)
        img_avg.set_cmap('gray')
        ax1.set_title('average face')
        plt.axis('off')
        ax2 = fig1.add_subplot(122)
        img_u1 = ax2.imshow(np.reshape(U[:,0],(m,n)).T)
        img_u1.set_cmap('gray')
        ax2.set_title('eigen face')
        plt.axis('off')
        plt.show()
        i_debug = 1
        if i_debug != 1:
            # 恢复原图像
            ## Now show eigenface reconstruction of image that was omitted from test set
            testFace = faces[:,np.sum(nfaces[:36])] # First face of person 37
            plt.imshow(np.reshape(testFace,(m,n)).T)
            plt.set_cmap('gray')
            plt.title('Original Image')
            plt.axis('off')
            plt.show()
            # testFaceMS为原图像减去均值
            testFaceMS = testFace - avgFace
            r_list = [25, 50, 100, 200, 400, 800, 1600]
            for r in r_list: # r是SVD的rank
                # U[:,:r].T @ testFaceMS要保存的压缩格式内容
                # U[:,:r]  @ U[:,:r].T @ testFaceMS：恢复出未压缩的增量内容
                # 增量内容加上均值图像即可得到恢复后的图像
                reconFace = avgFace + U[:,:r]  @ U[:,:r].T @ testFaceMS
                img = plt.imshow(np.reshape(reconFace,(m,n)).T)
                img.set_cmap('gray')
                plt.title('r = ' + str(r))
                plt.axis('off')
                plt.show()
        # 选出两个属性来进行可视化，评估分类效果
        ## Project person 2 and 7 onto PC5 and PC6
        P1num = 2 # Person number 2，取的是第2组共62个人
        P2num = 7 # Person number 7，取的是第7组共64个人
        print(f'### faces: {faces.shape};')
        P1 = faces[:,np.sum(nfaces[:(P1num-1)]):np.sum(nfaces[:P1num])]
        P2 = faces[:,np.sum(nfaces[:(P2num-1)]):np.sum(nfaces[:P2num])]
        P1 = P1 - np.tile(avgFace,(P1.shape[1],1)).T # 单个人(32256,62)
        P2 = P2 - np.tile(avgFace,(P2.shape[1],1)).T
        print(f'### P1: {P1.shape}; P2: {P2.shape};')
        PCAmodes = [5, 6] # Project onto PCA modes 5 and 6
        # U[:, *] is (32256,2) 取出所有人的两个特征
        # U[:, ...].T is (2,32256)
        # U[:, ...].T @ P1 第一个在指定特征上的值，形状：(2,62)
        x001 = PCAmodes-np.ones_like(PCAmodes)
        x002 = U[:,PCAmodes-np.ones_like(PCAmodes)]
        print(f'### x001: {x001.shape}; x002: {x002.shape};')
        PCACoordsP1 = U[:,PCAmodes-np.ones_like(PCAmodes)].T @ P1 # 62个人的坐标值
        PCACoordsP2 = U[:,PCAmodes-np.ones_like(PCAmodes)].T @ P2
        print(f'### PCACoordsP1: {PCACoordsP1.shape}; ???')
        # 将所选取的特征作为坐标轴，PCACoordsP1为该坐系下的点
        plt.plot(PCACoordsP1[0,:],PCACoordsP1[1,:],'d',color='k',label='Person 2')
        plt.plot(PCACoordsP2[0,:],PCACoordsP2[1,:],'^',color='r',label='Person 7')
        plt.legend()
        plt.show()


def main(params:Dict = {}) -> None:
    Chp01Sec06.startup(params=params)

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--run_mode', action='store',
        type=int, default=1, dest='run_mode',
        help='run mode'
    )
    return parser.parse_args()

if '__main__' == __name__:
    args = parse_args()
    params = vars(args)
    main(params=params)