# ldagen.py
# Implements a fully Bayesian smoothed LDA model with symmetrical
# Dirichlet priors for the topic distributions and the word disitribution
# per topic
#
# This file generates the data and then runs LDA on the generated data

from pyblog import *
import pylab, math
from time import time

## The smoothed LDA model (Blei, Ng and Jordan, 2003)

@var_dist
def topic_wts(d): return Dirichlet([float(ALPHA0)/MAXTOPICS \
                                    for i in range(MAXTOPICS)])

@var_dist
def topic(d, w): return Categorical(topic_wts(d))

@var_dist
def word(d, w): return Categorical(word_wts(topic(d, w)))

@var_dist
def word_wts(t):
  return Dirichlet([float(ETA0)/MAXWORDS for i in range(MAXWORDS)])

## end of model

## additional variables for generating data
@var_dist
def doclen(d): return Poisson(AVGWORDS)

@var
def corpus(numdocs):
  return [[word(d,w) for w in range(doclen(d))] for d in range(numdocs)]

def compute_perplexity(world, data):
  """
  Computes the perplexity of all the words in a world.
  """
  log_like, cnt = 0.0, 0
  word_dist = []
  for t in range(MAXTOPICS):
    word_dist.append(world[word_wts(t)])

  for d, doc in enumerate(data):
    topic_dist = world[topic_wts(d)]
    for w in doc:
      # we'll compute the word probability as a mixture distribution
      word_prob = 0.0
      for t, t_prob in enumerate(topic_dist):
        word_prob += t_prob * word_dist[t][w]
      cnt += 1
      log_like += math.log (word_prob)
  
  return math.exp( - log_like / cnt )

def generate(numdocs):
  """
  This will generate "numdocs" documents each with a random number of words
  from the LDA model. The LDA hyper parameters must be initialized before
  this call
  """
  return query([corpus(numdocs), pyblog_world()], [],
               burnin=0, scans=0, outtyp=QUERY_LAST, stats=False, trace=False)

def _construct_obs(data):
  obs = []
  for d, doc in enumerate(data):
    # observe w(d,i)
    for i, w in enumerate(doc):
      obs.append(word(d,i) == w)
  return obs

def inference_once(train, test):
  times_perp = []
  tot_time = 0.0
  obs = _construct_obs(train)
  last_world = None
  for scans in [1, 10, 100, 1000]:
    inits = None
    if last_world is not None:
      inits = [rvar == value for rvar, value in last_world.iteritems()]
    statsobj={}
    last_world, = query([pyblog_world()], obs, inits, outtyp = QUERY_LAST,
                        burnin=0, scans=scans, stats=False, statsobj=statsobj)
    tot_time += statsobj['scan-time']
    perp = compute_perplexity(last_world, test)
    print "Scans", scans, "Time", tot_time, "Test Perplexity", perp
    times_perp.append((tot_time, perp))
  
  return times_perp

def inference(train, test, numruns):
  trajectories = []
  for i in range(numruns):
    trajectories.append(inference_once(train, test))

  mean_traj = []

  for time_slice in zip(*trajectories):
    mean_traj.append( (sum(x[0] for x in time_slice) / len(time_slice),
                       sum(x[1] for x in time_slice) / len(time_slice)) )

  print "Mean Trajectory:", mean_traj
  return mean_traj

SIZE = 10
ALPHA0, MAXTOPICS, ETA0, MAXWORDS, AVGWORDS = \
  SIZE/2.0, SIZE, SIZE*5.0, SIZE*100, SIZE*10
NUMDOCS = SIZE
NUMRUNS = 10

def main():
  # generate some documents
  data, world = generate(NUMDOCS)

  print "LDA: DOCS=", NUMDOCS, "AVGWORDS=", AVGWORDS, "MAXTOPICS=", \
        MAXTOPICS, "ALPAH0=", ALPHA0, "MAXWORDS=", MAXWORDS, "ETA0=", ETA0
  
  # divide the data into training data and test data
  train, test = [], []
  for doc in data:
    train.append(doc[:len(doc)/2])
    test.append(doc[len(doc)/2:])
  
  print "Data perplexity: (train, test)", (compute_perplexity(world, train),
                                           compute_perplexity(world, test))
  print "Considering mean of %d runs" % NUMRUNS

  print "=" * 10
  print "Sampling from prior"
  print "=" * 10
  
  configure("PARAM_GIBBS_CONJUGATE", False)
  configure("PARAM_GIBBS_DISCRETE", False)
  configure("PARAM_COLLAPSED_SAMPLING", False)
  
  perp1 = inference(train, test, NUMRUNS)
  
  print "=" * 10
  print "Gibbs sampling"
  print "=" * 10
  
  configure("PARAM_GIBBS_CONJUGATE", True)
  configure("PARAM_GIBBS_DISCRETE", True)
  
  perp2 = inference(train, test, NUMRUNS)

  def coord(arr, i): return [x[i] for x in arr]
  
  pylab.plot(coord(perp1, 0), coord(perp1, 1), 'r:', label="Parent Sampling")
  pylab.plot(coord(perp2, 0), coord(perp2, 1), 'b-', label="Gibbs Sampling")
  pylab.xlabel("Time (s)")
  pylab.ylabel("Test Perplexity")
  pylab.title("Convergence Rate On LDA")
  pylab.legend(loc="upper right")
  pylab.savefig("ldagen.eps", format="eps")
  pylab.show()
  
  # Collapsed sampling needs fixing!
  #
  # print "Gibbs + collapsed"
  # configure("PARAM_COLLAPSED_SAMPLING", True)
  # times_perp = inference(train, test, NUMRUNS)


if __name__ == "__main__":
  try:
    main()
  except:
    import pdb, traceback, sys
    traceback.print_exc(file=sys.stdout)
    pdb.post_mortem(sys.exc_traceback)
    raise

  
  
