# lda.py
# Runs the LDA model on given data psych.txt

from pyblog import *
import math
from time import time

## The smoothed LDA model (Blei, Ng and Jordan, 2003)

@var_dist
def topic_wts(d): return Dirichlet([float(ALPHA0)/MAXTOPICS \
                                    for i in range(MAXTOPICS)])

@var_dist
def topic(d, w): return Categorical(topic_wts(d))

@var_dist
def word(d, w): return Categorical(word_wts(topic(d, w)))

@var_dist
def word_wts(t):
  return Dirichlet([float(ETA0)/MAXWORDS for i in range(MAXWORDS)])

## end of model

def compute_perplexity(world, data):
  """
  Computes the perplexity of all the words in a world.
  """
  log_like, cnt = 0.0, 0
  word_dist = []
  for t in range(MAXTOPICS):
    word_dist.append(world[word_wts(t)])

  for d, doc in enumerate(data):
    topic_dist = world[topic_wts(d)]
    for w in doc:
      # we'll compute the word probability as a mixture distribution
      word_prob = 0.0
      for t, t_prob in enumerate(topic_dist):
        word_prob += t_prob * word_dist[t][w]
      cnt += 1
      log_like += math.log (word_prob)
  
  return math.exp( - log_like / cnt )

def _construct_obs(data):
  obs = []
  for d, doc in enumerate(data):
    # observe word(d,i)
    for i, w in enumerate(doc):
      obs.append(word(d,i) == w)
  return obs

def inference(train, test):
  times_perp = []
  tot_time = 0.0
  obs = _construct_obs(train)
  last_world = None
  for scans in [1, 2, 3, 10, 100]:
    inits = None
    if last_world is not None:
      inits = [rvar == value for rvar, value in last_world.iteritems()]
    t1 = time()
    last_world, = query([pyblog_world()], obs, inits, outtyp = QUERY_LAST,
                        burnin=0, scans=scans, stats=True)
    tot_time += time()-t1
    perp = compute_perplexity(last_world, test)
    print "Scans", scans, "Time", tot_time, "Test Perplexity", perp
    times_perp.append((tot_time, perp))

def load_data(filename):
  def line2ints(line):
    return [int(x) for x in line.rstrip().split()]
  f = open(filename)
  l = f.readlines()
  words = line2ints(l[0])
  docs = line2ints(l[1])
  f.close()
  assert(len(words) == len(docs))

  numdocs = max(docs)
  data = [[] for i in range(numdocs)]
  for d,w in zip(docs, words):
    data[d-1].append(w-1)
  return data

MAXTOPICS = 50
ALPHA0 = 50
MAXWORDS = 10000
ETA0 = .01 * MAXWORDS

def main():
  data = load_data("psych.txt")
  
  configure("PARAM_GIBBS_DISCRETE", True)
  inference(data, data)
  
  
if __name__ == "__main__":
  try:
    main()
  except:
    import pdb, traceback, sys
    traceback.print_exc(file=sys.stdout)
    pdb.post_mortem(sys.exc_traceback)
    raise
  
