# test switching variable
import math

from pyblog.misc.util import Counter

from pyblog import *
from testmain import testmodule, MyTestCase, gauss_kl_diver_wt, gauss_kl_diver

class test1(MyTestCase):
  """
  A normal mixture model with two components. We use the model to generate
  the mean and the data. Then we try to estimate the mean from the data.
  """
  def runTest(self):
    @var_dist
    def c(i): return Bernoulli(.3)

    @var_dist
    def mu(k): return Normal(0, .001)
    
    @var_dist
    def y(i): return Normal(mu(c(i)), 1)

    # generate the means and the data
    N = 30
    ans = query([mu(0), mu(1)] + [y(i) for i in range(N)] \
                + [c(i) for i in range(N)], [],
                burnin=0, scans=1, stats=False)
    means = [ans[0].avg(), ans[1].avg()]
    data = [x.avg() for x in ans[2:N+2]]
    assign = [int(x.avg()) for x in ans[N+2:2*N+2]]
    sample_means = [sum(data[i] for i in range(N) if assign[i]==j) \
                    /sum(1.0 for i in range(N) if assign[i]==j) \
                    for j in range(2)]
    
    if dispstats:
      print "mu:", means
      print "data:", data
      print "sample mu:", sample_means
      
    obs = [y(i) == x for i,x in enumerate(data)]

    # now query the posterior
    pos_mean_0, pos_mean_1 = query([mu(0), mu(1)], obs, burnin=10,
                                   scans=100, trace=disptrace,
                                   stats=dispstats)

    # compute the divergence between the true density and the posterior
    # density
    div = gauss_kl_diver_wt([((x,1), 1.0/len(means)) for x in means],
                          [((x,1), .5*wt) for x,wt in pos_mean_0.iteritems()]\
                        + [((x,1), .5*wt) for x,wt in pos_mean_1.iteritems()])
    
    if dispstats:
      print "POSTERIOR: mu(0)", pos_mean_0.avg(), "mu(1)", pos_mean_1.avg()
      print "KL Divergence:", div

    self.assertTrue(div < .25,
                    "KL Divergence %f between (%f, %f) and (%f, %f)" \
                    % (div, means[0], means[1], pos_mean_0.avg(),
                       pos_mean_1.avg()))

class test2(MyTestCase):
  def runTest(self):
    @var_dist
    def c(i): return Bernoulli(.3)
    
    @var_dist
    def mu(k): return Normal(0, .001)
    
    @var_dist
    def y(i): return Normal(mu(c(i)), 1)

    # generate a single data point
    data, = query([y(0)], [], burnin=0, scans=0, stats=False,
                  outtyp = QUERY_LAST)
    # now infer the posterior distribution of c (it shouldn't really change!)
    pos_c, = query([c(0)], [y(0)==data], burnin=100, scans=10000,
                   stats=dispstats, trace=disptrace)
    if dispstats:
      print "y(0)==", data, "pos c:", pos_c.avg(), pos_c.std_dev()

    self.assertPctEqual(pos_c.mean(), .3, 20, "c")
    
class test3(MyTestCase):
  def runTest(self):
    @var_dist
    def n(): return Poisson(9)+1
    
    @var_dist
    def c(i): return UniformInt(0, n())
    
    @var_dist
    def mu(k): return Normal(0, .001)
    
    @var_dist
    def y(i): return Normal(mu(c(i)), 1)

    @var
    def all_y(): return tuple([y(i) for i in range(L)])

    @var
    def all_mu(): return tuple(set([mu(c(i)) for i in range(L)]))
    
    # generate some points
    L = 100
    ans = query([all_y(), all_mu()], [], stats=False, burnin=0, scans=1)
    data = ans[0].keys()[0]
    true_mu = list(ans[1].keys()[0])
    true_mu.sort()

    if dispstats:
      print "mu=", true_mu
      print "data=", data
      
    # now query the posterior
    ans = query([all_mu()], [y(i) == x for i,x in enumerate(data)],
                stats=dispstats, trace=disptrace,
                burnin=50, scans=200)

    diver = gauss_kl_diver_wt([((x,1), 1.0/len(true_mu)) for x in true_mu],
                              reduce(list.__add__, 
                              [[((x,1), prob/len(pos_mu)) for x in pos_mu] \
                               for pos_mu, prob in ans[0].iteritems()]))

    mode_mu = list(ans[0].mode())
    mode_mu.sort()
    
    if dispstats:
      print "True num components:", len(true_mu)
      print "Expected num components:", sum(len(x)*p \
                                            for x,p in ans[0].iteritems())
      print "Mode num components:",\
            Counter((len(x), y) for x,y in ans[0].iteritems()).argmax()
      print "KL divergence:", diver
      print "Mode: mu", mode_mu
      print "Mode KL divergence:", gauss_kl_diver([(x,1) for x in true_mu],
                                                  [(x,1) for x in mode_mu])
      
    self.assertTrue(diver < .5, "KL Divergence %f, True=%s Mode=%s" \
                    % (diver, str(true_mu), str(mode_mu)))

class testGibbsDiscrete(MyTestCase):
  """
  Test the Gibbs proposer for variables with finite values
  """
  def runTest(self):
    @var_dist
    def c(i): return Bernoulli(.3)
    
    @var_dist
    def mu(k): return Normal(0, .001)
    
    @var_dist
    def y(i): return Normal(mu(c(i)), 1)

    # generate a single data point
    data, = query([y(0)], [], burnin=0, scans=0, stats=False,
                  outtyp = QUERY_LAST)
    # now infer the posterior distribution of c (it shouldn't really change!)
    configure("PARAM_GIBBS_DISCRETE", True)    
    pos_c, = query([c(0)], [y(0)==data], burnin=100, scans=10000,
                   stats=dispstats, trace=disptrace)
    configure("PARAM_GIBBS_DISCRETE", False)
    if dispstats:
      print "y(0)==", data, "pos c:", pos_c.avg(), pos_c.std_dev()

    self.assertPctEqual(pos_c.mean(), .3, 20, "c")

class testGibbsDiscreteCollapsed(MyTestCase):
  """
  Test the Gibbs proposer for variables with finite values and with
  collapsing of parent variables
  """
  def runTest(self):
    @var_dist
    def c(i): return Bernoulli(.3)
    
    @var_dist
    def theta(k): return Dirichlet([.01 for i in range(100)])
    
    @var_dist
    def y(i): return Categorical(theta(c(i)))

    # now infer the posterior distribution of c (it shouldn't really change!)
    configure("PARAM_GIBBS_DISCRETE", True)
    configure("PARAM_COLLAPSED_SAMPLING", True)
    pos_c, = query([c(0)], [y(0)==0], burnin=100, scans=1000,
                   stats=dispstats, trace=disptrace)
    configure("PARAM_COLLAPSED_SAMPLING", False)
    configure("PARAM_GIBBS_DISCRETE", False)
    if dispstats:
      print "pos c:", pos_c.avg(), pos_c.std_dev()

    self.assertPctEqual(pos_c.mean(), .3, 10, "c")

class testGibbsPartial(MyTestCase):
  """
  Test the Gibbs proposer for variables with partially instantiated
  finite values
  """
  def runTest(self):
    @var_dist
    def wts(): return InfDirichlet(10)
    
    @var_dist
    def c(i): return InfCategorical(wts())
    
    @var_dist
    def mu(k): return Normal(0, .00001)
    
    @var_dist
    def y(i): return Normal(mu(c(i)), 1)
    
    @var
    def samecluster(i,j): return int(c(i)==c(j))
    
    # generate some data
    N = 10
    ## ans = query([samecluster(i,j) for i in range(N) for j in range(i)] \
    ##             + [y(i) for i in range(N)], [],
    ##             burnin=0, scans=0, stats=False,
    ##             outtyp = QUERY_LAST)
    ## 
    ## true_pairs = ans[:N*(N-1)/2]
    ## data = ans[N*(N-1)/2:]

    data = [40.689126421931244, -398.67199421755498, -239.16072711075162,
            -139.07728527438118, -239.81412429915443, -238.15103834564351,
            -21.725826086457303, 524.84963646187828, -136.8838341256741,
            -240.52433550058555]
    clus = [0, 1, 2, 3, 2, 2, 4, 5, 3, 2]
    true_pairs = []
    for i in range(N):
      for j in range(i):
        if clus[i] == clus[j]:
          true_pairs.append(1.0)
        else:
          true_pairs.append(0.0)

    # run once without Gibbs sampling just to ensure that the default
    # M-H proposer doesn't raise any exceptions
    configure("PARAM_GIBBS_PARTIAL_DISCRETE", False)
    guess_pairs = query([samecluster(i,j) for i in range(N) for j in range(i)],
                        [y(i)==d for i,d in enumerate(data)],
                        burnin=1, scans=10, trace=False, stats=False)

    # real test
    configure("PARAM_GIBBS_PARTIAL_DISCRETE", True)
    
    # now infer the posterior distribution of samecluster()
    guess_pairs = query([samecluster(i,j) for i in range(N) for j in range(i)],
                        [y(i)==d for i,d in enumerate(data)],
                        burnin=10, scans=100, trace=disptrace,
                        stats=dispstats)
    configure("PARAM_GIBBS_PARTIAL_DISCRETE", False)
    
    worst = max([abs(true_pairs[i] - guess_pairs[i].avg()) \
                 for i in range(N*(N-1)/2)])
    
    if dispstats:
      print "data:", data
      print "(datum1, datum2, true, guess)",
      k = 0
      for i in range(N):
        for j in range(i):
          t,g = true_pairs[k], guess_pairs[k].avg()
          k += 1
          if abs(t - g) > .1:
            print "(",data[i], data[j], t, g, ")",
      print
      print "worst:", worst

    self.assertAbsEqual(worst, 0.0, .5, "worst error in cluster assignment")
    
if __name__ == "__main__":
  testmodule(__import__("testswitch"))

