#!/usr/bin/env python3
# Author: Armit
# Create Time: 2022/11/19 

from argparse import ArgumentParser

from sklearnex import patch_sklearn ; patch_sklearn()
from sklearn.decomposition import *
from sklearn.manifold import *
import matplotlib.pylab as plt
import pandas as pd

from data import get_data, FEATURE_NUM, FEATURE_CAT, TARGET
from utils import get_cmap, CPU_COUNT, RAND_SEED

METHDOS = {
  # decomposition
  'pca':      lambda dim: PCA                        (n_components=dim,                   random_state=RAND_SEED),
  'kpca':     lambda dim: KernelPCA                  (n_components=dim, kernel='linear',  random_state=RAND_SEED, n_jobs=CPU_COUNT),
  'kpca-p':   lambda dim: KernelPCA                  (n_components=dim, kernel='poly',    random_state=RAND_SEED, n_jobs=CPU_COUNT),
  'kpca-r':   lambda dim: KernelPCA                  (n_components=dim, kernel='rbf',     random_state=RAND_SEED, n_jobs=CPU_COUNT),
  'kpca-s':   lambda dim: KernelPCA                  (n_components=dim, kernel='sigmoid', random_state=RAND_SEED, n_jobs=CPU_COUNT),
  'ipca':     lambda dim: IncrementalPCA             (n_components=dim, batch_size=100),
  'spca':     lambda dim: SparsePCA                  (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=True),
  'mb-spca':  lambda dim: MiniBatchSparsePCA         (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=True),
  'dl':       lambda dim: DictionaryLearning         (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=True),
  'mb-dl':    lambda dim: MiniBatchDictionaryLearning(n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=True),
  'nmf':      lambda dim: NMF                        (n_components=dim,                   random_state=RAND_SEED,                   verbose=2),
  'mb-nmf':   lambda dim: MiniBatchNMF               (n_components=dim,                   random_state=RAND_SEED,                   verbose=2),
  'fica':     lambda dim: FastICA                    (n_components=dim,                   random_state=RAND_SEED),
  'tsvd':     lambda dim: TruncatedSVD               (n_components=dim,                   random_state=RAND_SEED),
  'lda':      lambda dim: LatentDirichletAllocation  (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=2),  # this is NOT the `sklearn.discriminant_analysis.LinearDiscriminantAnalysis`
  # manifold
  'tsne':     lambda dim: TSNE                       (n_components=dim, init='random',    random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=2, square_distances=True),
  'tsne-pca': lambda dim: TSNE                       (n_components=dim, init='pca',       random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=2, square_distances=True),
  'lle':      lambda dim: LocallyLinearEmbedding     (n_components=dim, n_neighbors=5,    random_state=RAND_SEED, n_jobs=CPU_COUNT),
  'im':       lambda dim: Isomap                     (n_components=dim, n_neighbors=5,                            n_jobs=CPU_COUNT),
  'mds':      lambda dim: MDS                        (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT, verbose=2),
  'se':       lambda dim: SpectralEmbedding          (n_components=dim,                   random_state=RAND_SEED, n_jobs=CPU_COUNT),
}


def _pca(X:pd.DataFrame, method:str='pca', dim=2, show_eigvec=False) -> pd.DataFrame:
  projector = METHDOS[method](dim)
  X_hat = projector.fit_transform(X)

  if method == 'pca':
    if show_eigvec:
      print('features:', FEATURE_NUM)
      print('eigvec:')
      print(projector.components_)

    print(f'  explained_variance: {projector.explained_variance_}')
    print(f'  explained_variance_ratio: {projector.explained_variance_ratio_} => {sum(projector.explained_variance_ratio_):.3%}')
  elif method == 'tsne':
    print(f'  kl_divergence: {projector.kl_divergence_}')

  return X_hat


def _display(X, Y, dim=3, title=''):
  plt.clf()
  if dim == 2:
    x_min, x_max = X[:, 0].min(), X[:, 0].max()
    y_min, y_max = X[:, 1].min(), X[:, 1].max()
    plt.scatter(X [:, 0], X [:, 1], s=1, c=Y, cmap=get_cmap(Y))
    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
  elif dim == 3:
    ax = plt.axes(projection='3d')
    ax.scatter(X[:, 0], X[:, 1], zs=X[:, 2], zdir='z', s=1, c=Y, cmap=get_cmap(Y))
  else:
    raise ValueError
  plt.suptitle(title)
  plt.tight_layout()
  plt.show()


def pca(args):
  X, Y = get_data(limit=args.limit, features=FEATURE_NUM, target=args.target, sample_method='naive')

  X_hat = _pca(X, method=args.method, dim=args.dim, show_eigvec=True)
  print('X_hat.shape:', X_hat.shape)
 
  _display(X_hat, Y, args.dim, title=args.target)


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-M', '--method', default='pca', choices=METHDOS.keys())
  parser.add_argument('-T', '--target', default=TARGET, choices=FEATURE_CAT)
  parser.add_argument('-D', '--dim', default=3, type=int, choices=[2, 3], help='projected dimension')
  parser.add_argument('-N', '--limit', default=20000, type=int, help='limit dataset size')
  args = parser.parse_args()

  pca(args)
