# gmm.py  -- Gaussian Mixture Model
# ======================================================
#
# Either Dirichlet Process  (dpgmm_model.py)
# or random number of fixed clusters with uniform weight (unigmm_model.py)
#
# In this example, we first generate a number of observations using the
# model. Then we divide the observation into a 50-50 training and test set.
# Using the training set of unlabeled points we estimate the number of
# clusters and the mean and variance of each cluster. These estimated values
# are evaluated by measuring the average likelihood of the test data.

from pyblog import *
from time import time
import pylab, math
import numpy
from optparse import OptionParser


def avg_loglikelihood(mean_precs, points):
  # first create an array of normal distributions
  dists = [(MNormal(m,p), wt) for (m,p,wt) in mean_precs]
  def point_log_likelihood(pt):
    ans = sum(wt * dist.prob(pt) for dist, wt in dists)
    # handle underflow by taking a min
    if ans == 0:
      return min(math.log(wt) + dist.log_prob(pt))
    return math.log(ans)
  
  # then evaluate each point at each of the distributions
  return sum(point_log_likelihood(pt) for pt in points) / len(points)

def compute_cov_points(mean, prec):
  """
  For visualizing a covariance matrix
  """
  cov = numpy.linalg.inv(prec)
  v, w = numpy.linalg.eig(cov)
  s1, e1 = math.sqrt(v[0]), w[:,0]
  s2, e2 = math.sqrt(v[1]), w[:,1]

  xc, yc = [], []
  for theta in range(int(math.pi * 2 * 10)):
    p = mean + s1 * e1 * math.cos(theta/10.0) + s2 * e2 * math.sin(theta/10.0)
    xc.append(float(p[0]))
    yc.append(float(p[1]))
  
  return xc, yc

SCANCOUNTS = [1, 9, 90, 200, 400, 800]
def main():
  parser = OptionParser()
  parser.add_option("-m", "--model", dest="model", default="DP",
                    help="Model Type -- DP (Dirichlet Process), UNI (Poisson"\
                    +" number of components (default DP)")
  parser.add_option("-r", "--runs", dest="runs", default=int(1),
                    type = int,
                    help="Number of trajectories to average over (default 1)")
  parser.add_option("-n", "--numpoints", dest="numpoints", default=int(1000),
                    type = int,
                    help="Number of points (default 1000)")
  parser.add_option("-a", "--alpha", dest="alpha", default=10.0,
                    type = float,
                    help="DP concentration (default 10.0)")
  parser.add_option("-c", "--avgnumclust", dest="avgnumclust", default=int(40),
                    type = int,
                    help="Poisson avg. number of clusters (default 40)")
  parser.add_option("-q", "--quiet", action="store_true",
                    dest="quiet", default=False,
                    help="Don't display anything")
  parser.add_option("-v", "--visualize", action="store_true",
                    dest="visualize", default=False,
                    help="Visualize a sample trajectory in a scatter-plot")
  parser.add_option("-s", "--scans", dest="scans", default=int(4),
                    type = int,
                    help="number of different scans (default 4, max 6)")
  
  (options, args) = parser.parse_args()
  
  # generate some data

  if options.model.lower() == "dp":
    from dpgmm_model import generate, estimate
    print "DP-GMM: Generating %d points with conc %.2f" % \
          (options.numpoints, options.alpha)
    train, test, clusters = generate(options.alpha, options.numpoints)
    model_name = "DP-GMM"
  
  elif options.model.lower() == "uni":
    from unigmm_model import generate, estimate
    print "Uniform-GMM: Generating %d points with avg. %d clusters" % \
          (options.numpoints, options.avgnumclust)
    train, test, clusters = generate(options.alpha, options.numpoints)
    model_name = "Uniform-GMM"

  else:
    print "Unknown option for model:", options.model.lower()
    return
  
  test_like = avg_loglikelihood(clusters, test)
  print "Test Data avg. log-likelihood = %.2f" % test_like,
  print "# used clusters =", len(clusters)

  print "Running %d trajectories for each configuration:" % options.runs
  
  print "=" * 10
  print "Gibbs Sampling"
  print "=" * 10
  # Gibbs sampling
  configure("PARAM_GIBBS_PARTIAL_DISCRETE", True)
  if options.visualize:
    pylab.figure(1)
    sample_plot(estimate, train, test, options.scans)
  
  pts1 = inference(estimate, train, test, options.runs,
                   SCANCOUNTS[:options.scans])

  print "=" * 10
  print "Parent Sampling"
  print "=" * 10
  # Parent sampling
  configure("PARAM_GIBBS_CONJUGATE", False)
  configure("PARAM_GIBBS_PARTIAL_DISCRETE", False)
  if options.visualize:
    pylab.figure(2)
    sample_plot(estimate, train, test, options.scans)
  pts2 = inference(estimate, train, test, options.runs,
                   SCANCOUNTS[:options.scans])

  def coord(data, i): return [x[i] for x in data]
  
  # compare the performance
  pylab.figure(3)
  pylab.plot(coord(pts1, 0), coord(pts1, 1), 'b-',
             label="Gibbs Sampling")
  pylab.plot(coord(pts2, 0), coord(pts2, 1), 'r:',
             label="Parent Sampling")
  pylab.ylim(-20, test_like)
  pylab.xlim(0, min(pts1[-1][0], pts2[-1][0]))
  pylab.xlabel("Time (s)")
  pylab.ylabel("Test Average Log Likelihood")
  pylab.title("Convergence Rate On " + model_name)
  pylab.legend(loc="lower right")
  pylab.savefig(model_name+".eps", format="eps")
  pylab.savefig(model_name+".pdf", format="pdf")
  if not options.quiet:
    pylab.show()

def inference(estimate, train, test, numruns, scan_range):
  all_data = []
  for i in range(numruns):
    world = None
    tot_time = 0.0
    curr_data = []
    for scans in scan_range:
      clusters, world, curr_time = estimate(train, scans, world)
      test_like = avg_loglikelihood(clusters, test)
      tot_time += curr_time
      curr_data.append((tot_time, test_like))
      
    print "run %d" % i, curr_data
    all_data.append(curr_data)
    
  mean_data = []
  for data in zip(*all_data):
    mean_data.append((sum(x[0] for x in data)/len(data),
                      sum(x[1] for x in data)/len(data)))

  print "mean", mean_data
  return mean_data
  
  
def sample_plot(estimate, train, test, numsubplots):
  # compute the data min and max for drawing each subplot
  xmin, xmax = min(float(pt[0]) for pt in train+test), \
               max(float(pt[0]) for pt in train+test)
  ymin, ymax = min(float(pt[1]) for pt in train+test), \
               max(float(pt[1]) for pt in train+test)
  
  # estimate the mean and precision and verify this by
  # using the likelihood on the test data
  times, likelihoods = [], []
  total_time, total_scans = 0, 0
  world = None
  for subplotnum in range(numsubplots):
    curr_scans = SCANCOUNTS[subplotnum]
    total_scans += curr_scans
    print "Scans =", total_scans,
    clusters, world, curr_time = estimate(train, curr_scans, world)
    total_time += curr_time
    train_like = avg_loglikelihood(clusters, train)
    test_like = avg_loglikelihood(clusters, test)
    print "Time = %.2f" % total_time,
    print "Train = %.2f" % train_like,
    print "Test = %.2f" % test_like,
    print "# Clusters =", len(clusters)

    times.append(total_time)
    likelihoods.append(test_like)

    # create a subplot for this run
    pylab.subplot(2, numsubplots/2, subplotnum+1)
    pylab.title("scans=%d, avg. test likelihood=%.2f" % \
                (total_scans, test_like))
    
    # plot the data in a scatter plot
    pylab.scatter([ x[0,0] for x in train], [x[1,0] for x in train],
                  s=5, c='b', marker='x', edgecolor='b', label="x train")
    pylab.scatter([ x[0,0] for x in test], [x[1,0] for x in test],
                  s=5, c='r', marker='+', edgecolor='r', label="+ test")
    
    # plot the ellipses for each cluster's precision
    for mean, prec, wt in clusters:
      xc, yc = compute_cov_points(mean, prec)
      pylab.plot(xc, yc, 'g-', linewidth=max(1, int(wt*20)))
      #pylab.plot([float(mean[0])], [float(mean[1])], 'go')

    pylab.axis([xmin, xmax, ymin, ymax])
    #pylab.axis('equal')
    pylab.legend(loc="best")

  return times, likelihoods

if __name__ == "__main__":
  try:
    main()
  except SystemExit:
    raise
  except:
    import pdb, traceback, sys
    traceback.print_exc(file=sys.stdout)
    pdb.post_mortem(sys.exc_traceback)
    raise
