"""Implements the Gaussian Mixture model, and trains using EM algorithm."""
import numpy as np
import scipy
from scipy.stats import multivariate_normal
import random

import matplotlib.pyplot as plt


class GaussianMixtureModel(object):
    """Gaussian Mixture Model"""
    def __init__(self, n_dims, n_components=1,
                 max_iter=10,
                 reg_covar=1e-6):
        """
        Args:
            n_dims: The dimension of the feature.
            n_components: Number of Gaussians in the GMM.
            max_iter: Number of steps to run EM.
            reg_covar: Amount to regularize the covariance matrix, (i.e. add
                to the diagonal of covariance matrices).
        """
        self._n_dims = n_dims
        self._n_components = n_components
        self._max_iter = max_iter
        self._reg_covar = reg_covar

        # Randomly Initialize model parameters.
        # Alternatively initiazlied with random points in the dataset (see fit() method).
        self._mu = np.random.rand(self._n_components, self._n_dims) # np.array of size (n_components, n_dims)

        # Initialized with uniform distribution.
        # Should make sure pi sums to 1.
        temp = np.random.uniform(0, 1, (self._n_components, 1))  # np.array of size (n_components, 1)
        self._pi = temp / np.sum(temp)

        # Initialized with identity.
        self._sigma = np.zeros((self._n_components, self._n_dims, self._n_dims))  # np.array of size (n_components, n_dims, n_dims)
        for k in range(self._n_components):
            np.fill_diagonal(self._sigma[k], 1000)


    def fit(self, x):
        """Runs EM steps.

        Runs EM steps for max_iter number of steps.

        Args:
            x(numpy.ndarray): Feature array of dimension (N, ndims).
        """

        # Re-initiazlied with random points in the dataset.
        rand_idx = random.sample(range(x.shape[0]), k=self._n_components)
        self._mu = x[rand_idx]  # np.array of size (n_components, n_dims)

        # norm_diffs = []
        # pi_diffs = []  # difference for pi[0]


        for i in range(self._max_iter):
            # print("Running ite: {}".format(i))
            # norm_prev = np.linalg.norm(self._mu)
            # pi_prev = self._pi[0]

            z_ik = self._e_step(x)
            self._m_step(x, z_ik)

        # --------------------- Check convergence -------------------------
        #     norm_curr = np.linalg.norm(self._mu)
        #     diff = np.absolute(norm_curr - norm_prev)
        #     norm_diffs.append(diff)

        #     pi_curr = self._pi[0]
        #     pi_diff = np.absolute(pi_curr - pi_prev)
        #     pi_diffs.append(pi_diff)

        #     print("Norm diff of mu: {}".format(diff))
        #     print("Diff for pi[0]: {}".format(pi_diff))
        
        # plt.scatter(range(self._max_iter), norm_diffs, s=1)
        # plt.scatter(range(self._max_iter), pi_diffs, s=1)
        # plt.show()
        # --------------------- Check convergence -------------------------



    def _e_step(self, x):
        """E step.

        Wraps around get_posterior.

        Args:
            x(numpy.ndarray): Feature array of dimension (N, ndims).
        Returns:
            z_ik(numpy.ndarray): Array containing the posterior probability
                of each example, dimension (N, n_components).
        """
        z_ik = self.get_posterior(x)
        return z_ik

    def _m_step(self, x, z_ik):
        """M step, update the parameters.

        Args:
            x(numpy.ndarray): Feature array of dimension (N, ndims).
            z_ik(numpy.ndarray): Array containing the posterior probability
                of each example, dimension (N, n_components).
                (Alternate way of representing categorical distribution of z_i)
        """
        # Update the parameters.
        z_k = np.sum(z_ik, axis=0)[:, np.newaxis]  # (K, 1)
        mu_t = self._mu

        # Get pi.
        self._pi = z_k / x.shape[0]

        # Get mu.
        self._mu = np.dot(z_ik.T, x) / z_k

        # Get sigma.
        reg = np.identity(self._n_dims) * self._reg_covar
        for k in range(self._n_components):
            x_minus_mu = x - mu_t[k]
            z_time_xmu = z_ik[:, k][:, np.newaxis] * x_minus_mu
            self._sigma[k, :, :] = np.dot(x_minus_mu.T, z_time_xmu) / z_k[k][0]
            # Regularize to avoid singular matrix.
            self._sigma[k, :, :] += reg

    def get_conditional(self, x):
        """Computes the conditional probability.

        p(x^(i)|z_ik=1)

        Args:
            x(numpy.ndarray): Feature array of dimension (N, ndims).
        Returns:
            ret(numpy.ndarray): The conditional probability for each example,
                dimension (N, n_components).
        """
        ret = []

        for k in range(self._n_components):
            prob = self._multivariate_gaussian(x, self._mu[k], self._sigma[k])
            ret.append(prob)

        return np.array(ret).T

    def get_marginals(self, x):
        """Computes the marginal probability.

        p(x^(i)|pi, mu, sigma)

        Args:
             x(numpy.ndarray): Feature array of dimension (N, ndims).
        Returns:
            (1) The marginal probability for each example, dimension (N,).
        """
        condition_prob = self.get_conditional(x)
        marginal = np.sum((condition_prob.T * self._pi).T, axis=1)

        return marginal

    def get_posterior(self, x):
        """Computes the posterior probability.

        p(z_{ik}=1|x^(i))

        Args:
            x(numpy.ndarray): Feature array of dimension (N, ndims).
        Returns:
            z_ik(numpy.ndarray): Array containing the posterior probability
                of each example, dimension (N, n_components).
        """
        upper = (self.get_conditional(x).T * self._pi).T
        lower = self.get_marginals(x)
        z_ik = upper / lower[:, np.newaxis]

        return z_ik

    def _multivariate_gaussian(self, x, mu_k, sigma_k):
        """Multivariate Gaussian, implemented for you.
        Args:
            x(numpy.ndarray): Array containing the features of dimension (N,
                ndims)
            mu_k(numpy.ndarray): Array containing one single mean (ndims,1)
            sigma_k(numpy.ndarray): Array containing one signle covariance matrix
                (ndims, ndims)
        """
        return multivariate_normal.pdf(x, mu_k, sigma_k)

    def supervised_fit(self, x, y):
        """Assign each cluster with a label through counting.
        For each cluster, find the most common digit using the provided (x,y)
        and store it in self.cluster_label_map.
        self.cluster_label_map should be a list of length n_components,
        where each element maps to the most common digit in that cluster.
        (e.g. If self.cluster_label_map[0] = 9. Then the most common digit
        in cluster 0 is 9.
        Args:
            x(numpy.ndarray): Array containing the feature of dimension (N,
                ndims).
            y(numpy.ndarray): Array containing the label of dimension (N,)
        """

        self.fit(x)

        self.cluster_label_map = random.sample(range(self._n_components), k=self._n_components)

        # Already fit the model. Get posterior.
        # Assign data to a cluster with highest prob.
        z_ik = self.get_posterior(x)
        assign_idx = np.argmax(z_ik, axis=1)

        # Find majority label for every cluster.
        for k in range(self._n_components):
            # Find row index of data points assigned to cluster k.
            row_idx = np.where(assign_idx == k)[0]
            if len(row_idx) != 0:
                # Retrieve the provided labels for these data.
                labels = y[row_idx]
                # Get major label and set it as the label for cluster k.
                labels_unique, counts = np.unique(labels, return_counts=True)
                self.cluster_label_map[k] = labels_unique[np.argmax(counts)]


    def supervised_predict(self, x):
        """Predict a label for each example in x.
        Find the get the cluster assignment for each x, then use
        self.cluster_label_map to map to the corresponding digit.
        Args:
            x(numpy.ndarray): Array containing the feature of dimension (N,
                ndims).
        Returns:
            y_hat(numpy.ndarray): Array containing the predicted label for each
            x, dimension (N,)
        """

        z_ik = self.get_posterior(x)

        y_hat = []

        assign_idx = np.argmax(z_ik, axis=1)
        for i in range(assign_idx.shape[0]):
            y_hat.append(self.cluster_label_map[assign_idx[i]])

        return np.array(y_hat)
