# Test the dirichlet and categorical distributions

import math
from time import time
from pyblog import *
from testmain import MyTestCase, testmodule
from random import Random
from pprint import pprint

class test_dirichlet_dist(MyTestCase):
  def testmeanvar(self):
    dir = Dirichlet([1,3,5,4])
    rand = Random()
    sx, sx2 = 0.0, 0.0
    N = 10000
    for i in range(N):
      x = dir.sample(rand)[1]
      sx += x
      sx2 += x**2
    mean = sx / N
    var = sx2 / N - mean ** 2

    exp_mean = 3.0 / 13.0
    exp_var = 3.0 * (13.0 - 3.0) / ( 13.0 ** 2 * (13.0 + 1))
    
    if dispstats:
      print "Actual mean, var:", mean, var
      print "Expected mean, var:", exp_mean, exp_var

    self.assertPctEqual(mean, exp_mean, 3, "mean")
    self.assertPctEqual(var, exp_var, 3, "variance")

  def testprob2(self):
    dir = Dirichlet([1,3])
    N = 10000
    totalprob = 0
    for i in range(N+1):
      p1 = float(i)/N
      p2 = 1 - p1
      totalprob += dir.prob([p1, p2]) / N
      
    if dispstats:
      print "TotalProb:", totalprob

    self.assertPctEqual(totalprob, 1.0, 10, "total probability")

  def testprob3(self):
    dir = Dirichlet([1, 3, 5])
    N = 100
    totalprob = 0
    for i1 in range(N+1):
      p1 = float(i1)/N
      for i2 in range(N+1-i1):
        p2 = float(i2)/N
        p3 = 1 - p1 - p2
        totalprob += dir.prob([p1, p2, p3]) * (1.0/ N) * (1.0 / N)
      
    if dispstats:
      print "TotalProb:", totalprob

    self.assertPctEqual(totalprob, 1.0, 10, "total probability")

class test_categorical_dist(MyTestCase):
  def testsample(self):
    prob = [.1,.4, .3, .2]              # true probabilities
    obs = [0, 0, 0, 0]                  # observations
    cat = Categorical(prob)
    rand = Random()
    N = 10000
    for i in range(N):
      obs[cat.sample(rand)] += 1
    obs = [float(x) / N for x in obs]   # normalize

    if dispstats:
      print
      print "True:", prob
      print "Obs:", obs

    # check the max discrepancy
    self.assertAbsEqual(max(abs(x-y) for x,y in zip(prob, obs)), 0.0, .01,
                        "absolute difference")

  def testprob(self):
    prob = [.1,.4, .3, .2]              # true probabilities
    cat = Categorical(prob)
    obs = []
    for i in range(len(prob)):
      obs.append(cat.prob(i))

    if dispstats:
      print
      print "True:", prob
      print "Obs:", obs

    # check the max discrepancy
    self.assertAbsEqual(max(abs(x-y) for x,y in zip(prob, obs)), 0.0, .01,
                        "absolute difference")

class testConjugacy(MyTestCase):
  def runTest(self):
    from pyblog.misc.poly import Polynomial
    x = Polynomial([.1,.2,.3])
    s0 = Categorical.SufficientStats()
    s0.add(0)
    l0 = Categorical(x)(s0)
    s1 = Categorical.SufficientStats()
    s1.add(1)
    l1 = Categorical(x)(s1)
    s2 = Categorical.SufficientStats()
    s2.add(2)
    l2 = Categorical(x)(s2)

    like = (l0 * l2) * (l2 * l0) * (l0 * l1) * (l1 * l0) \
           * (l1 * l2) * (l2 * l1)

    like = like * 2
    like = 2 * like

    post = like * Dirichlet([.5,.5,.5])
    
    if dispstats:
      print
      print "Likelihood:", str(like)
      print "Posterior:", post

    self.assertTrue(like.get_dirichlet_weight_dict() \
                    == {0:4, 1:4, 2:4}, "likelihood")
    self.assertTrue(post.get_dirichlet_weight_vec() == [4.5, 4.5, 4.5],
                    "posterior")
      

class testprop(MyTestCase):
  """
  Ensures that the user proposers and the automatic inference give identical
  answers
  """
  def runTest(self):
    @var_dist
    def theta(): return Dirichlet([1,3,2])
    @var_dist
    def beta(t): return Dirichlet([4, 1, 5, 6, 7, 8])
    @var_dist
    def z(i): return Categorical(theta())
    @var_dist
    def w(i): return Categorical(beta(z(i)))
    
    obs = [w(0)==0, w(1)==5, w(2)==3]

    seed = int(time())
    ans = query([z(0), z(1), z(2)], obs, stats = dispstats, scans=10,
                trace = disptrace, deterministic=True,
                seed=seed, outtyp = QUERY_LAST)
    if dispstats:
      print "Ans"
      print [x for x in ans]
      print "Rerunning with user proposer"
    
    @gibbs_simple(beta)
    @gibbs_simple(theta)
    def theta_proposer(rand, dist, val, *childvals):
      # copy the parent dirichlet weights
      wts = [x for x in dist.get_dirichlet_weight_vec()]
      for c in childvals:
        wts[c] += 1
      return Dirichlet(wts).sample(rand)

    ans2 = query([z(0), z(1), z(2)], obs, stats = dispstats,scans=10,
                 trace=disptrace, deterministic = True,
                 seed=seed, outtyp = QUERY_LAST)
    if dispstats:
      print "(Gibbs) Ans"
      print [x for x in ans2]

    self.assertTrue(sum(abs(x1 - x2) for x1, x2 in zip(ans, ans2)) == 0,
                    "Answers differ: automatic =" + str(ans) \
                    + "user proposed" + str(ans2))

class testCollapsedLDA(MyTestCase):
  def runTest(self):
    
    @var_dist
    def theta(d): return Dirichlet([alpha0/k for _ in range(k)])

    @var_dist
    def z(d, i): return Categorical(theta(d))

    @var_dist
    def w(d, i): return Categorical(beta(z(d,i)))

    @var_dist
    def beta(t): return Dirichlet([eta0/V for _ in range(V)])

    k, V = 4, 10                       # k - num topics, V - num words
    alpha0 = k
    eta0 = V
    
    configure("PARAM_GIBBS_CONJUGATE", True)
    configure("PARAM_GIBBS_DISCRETE", True)
    configure("PARAM_COLLAPSED_SAMPLING", True)
    
    data = [[1,1,1,2,2,2], [3,3,3,3,6,6,6,6,8,8,8,8,8,9], [0,0,0,0,1,1,1],
            [3,3,3,4,4,4,4,4,5,5,5,5]]
    
    query([theta(d) for d in range(len(data))],
          [w(d,i) = v for d,doc in enumerate(data) for i,v in enumerate(doc)],
          burnin=0, scans=1, stats=False)
  
class testLDA(MyTestCase):
  
  def runTest(self):
    
    @var_dist
    def theta(d): return Dirichlet([alpha0/k for _ in range(k)])

    @var_dist
    def z(d, i): return Categorical(theta(d))

    @var_dist
    def w(d, i): return Categorical(beta(z(d,i)))

    @var_dist
    def beta(t): return Dirichlet([eta0/V for _ in range(V)])

    k, V = 4, 1000                     # k - num topics, V - num words
    alpha0 = 4.0
    eta0 = 1000.0

    @var
    def corpus(m, n): return [[w(d,i) for i in range(n)] for d in range(m)]

    def compute_perplexity(world):
      ll = 0.0
      cnt = 0
      for rvar, val in world.iteritems():
        # we will compute the likelihood of each w(.,.) random variable
        if rvar.fn_name() == w:
          topic = world[z(*rvar.fn_args())]
          ll += math.log(world[beta(topic)][val])
          cnt += 1
      return math.exp( - ll / cnt)

    # generate data
    #M, N = 50, 100                      # M docs, N words in each doc
    M, N = 4, 25
    corpus, world = query([corpus(M, N), pyblog_world()], [],
                          stats=dispstats,
                          outtyp=QUERY_LAST, burnin=0, scans=0)

    perp = compute_perplexity(world)

    def topwords(worddist, n=10):
      output = []
      copydist = [x for x in worddist]
      for i in range(n):
        best = max(copydist)
        pos = copydist.index(best)
        output.append(pos)
        copydist[pos] = None
      return output
      
    if disptrace:
      print "CORPUS:"
      for x in corpus:
        print x
      print "TOPIC DIST:"
      for d in range(M):
        print ["%.2f" % x for x in world[theta(d)]]
      print "WORD DIST PER TOPIC"
      for t in range(k):
        print ["%.2f" % x for x in world[beta(t)]]

    if dispstats:
      for t in range(k):
        print "Topic %d" %t, topwords(world[beta(t)])
      print "Perplexity: ", perp

    # compute the observation
    obs = [rvar == value for rvar, value in world.iteritems() \
           if rvar.fn_name() == w]

    # intially assign each word to a random topic
    myrand = Random()
    inits = []
    for d in range(M):
      for i in range(N):
        t = myrand.randrange(k)
        inits.append(z(d,i) == t)
        
    seed = int(time())
    
    world2, = query([pyblog_world()], obs, inits,
                    seed = seed, trace=disptrace,
                    stats=dispstats, outtyp=QUERY_LAST, burnin=0,
                    scans=10)
    perp2 = compute_perplexity(world2)
    
    if disptrace:
      print "TOPIC DIST:"
      for d in range(M):
        print ["%.2f" % x for x in world2[theta(d)]]
      print "WORD DIST PER TOPIC"
      for t in range(k):
        print ["%.2f" % x for x in world2[beta(t)]]
    
    if dispstats:
      for t in range(k):
        print "Topic %d" %t, topwords(world2[beta(t)])
      print "Perplexity: ", perp2
    
    self.assertTrue(perp2 < perp * 1.20, "perplexity %f vs true %f" \
                    % (perp2, perp) )


    if dispstats:
      print "RERUNNING WITH USER PROVIDED GIBBS PROPOSERS"
      
    # now, we'll re-run the above test with a Gibbs proposer for beta
    @gibbs_simple(beta)
    @gibbs_simple(theta)
    def dir_cat_proposer(rand, dist, val, *childvals):
      # copy the parent dirichlet weights
      wts = [x for x in dist.get_dirichlet_weight_vec()]
      for c in childvals:
        wts[c] += 1
      return Dirichlet(wts).sample(rand)

    @gibbs_switching(z)
    def z_proposer(rand, dist, val, w_val):
      wts = [dist.log_prob(t) + math.log(beta(t)[w_val]) \
             for t in dist.finite_support()]
      # bring the weights close to 1
      maxwt = max(wts)
      wts = [math.exp(x - maxwt) for x in wts]
      # normalize weights
      sumwt = sum(wts)
      wts = [x/sumwt for x in wts]
      return Categorical(wts).sample(rand)

    world3, = query([pyblog_world()], obs, inits,
                    seed = seed, trace = disptrace,
                    stats=dispstats, outtyp=QUERY_LAST, burnin=0,
                    scans=10)
    perp3 = compute_perplexity(world3)
    
    if disptrace:
      print "TOPIC DIST:"
      for d in range(M):
        print ["%.2f" % x for x in world3[theta(d)]]
      print "WORD DIST PER TOPIC"
      for t in range(k):
        print ["%.2f" % x for x in world3[beta(t)]]
    
    if dispstats:
      for t in range(k):
        print "Topic %d" %t, topwords(world3[beta(t)])
      print "Perplexity: ", perp3
    
    self.assertTrue(perp3 < perp * 1.20, "perplexity %f vs true %f" \
                    % (perp3, perp) )

      
if __name__ == "__main__":
  testmodule(__import__("testdirichlet"))
