// DDP.cpp: implementation of the DDP class.
//
//////////////////////////////////////////////////////////////////////

#include <iterator>
#include <algorithm>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include "DDP.h"
#include "util.h"
#include "program.h"

//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////

#define DEBUG  1

void DDP::assertbug(int lineno){
	int		nstart = m_nBlockStart;
	int		nend = m_nBlockEnd;
	vector<vector<int> >	&l	= m_NumClassL;
	vector<int> &p = m_NumClassP;
	int J = m_NumClassP.size();
	for ( int jjj = 0; jjj < J; jjj++)
		for ( int tt = 0; tt < nend - nstart; tt++)
		{
			if ( p[jjj] < l[jjj][tt] )
				cout << " line " << lineno << "  p[jjj] " << p[jjj] << " l[jjj][tt] " << l[jjj][tt] << " jjj " << jjj << " tt " << tt << "m_nBlockEnd " << m_nBlockEnd << " m_nBlockStart " << m_nBlockStart << endl; 
			assert( p[jjj] >= l[jjj][tt] );
		}
}

DDP::DDP()
{
  m_pData = 0;
  m_EqClassD = 0;
  m_NumClassM = 0;

  m_gamma = 0.4;

  m_doConparam = 1;		// hyper parameter sampling
  m_bCheckConvg = 1;
  m_bAmbiguous = 0;

  m_nthIter = 0;
  m_nThining = 5;
  m_nBurninIteration = 5000;
  m_nCumIteration = 5000;

  m_strOutdir = "./output/";
  m_strFilename = "test";
  m_pFPTheta = 0;

  //// parameters for prior //////
  // mutation
  alpha_h	= 80.0f;
  beta_h	= 0.8f; // 1.2f 	//	**1.2f	//* 0.8f
  // noisy observation
  alpha_g = 5000.f;
  beta_g	= 10.0f;	//	//* 5.0f
  //
  mu1		= 0.23f;
  mu2		= 0.02f;
  // scale parameter  (child DP)
  alpha_a = 0.5f; //0.5	//1.0
  alpha_b	= 0.1f; // 0.1	//0.4
  // scale parameter (master DP)
  gamma_a = 0.5f;	//1.0
  gamma_b	= 0.1f;	//0.4
  alpha_mut = 0.0001f;
  beta_mut = 0.1f;


}

DDP::~DDP()
{

}

int DDP::LoadData( vector<string> &filelist )
{
  int nfile = filelist.size();

  if ( m_pData == 0)
    m_pData = new GenoHaploDB();

  m_pData->LoadData( filelist );

  m_nBlockStart = 0;
  m_nBlockEnd = m_pData->m_numT;
  m_nBlockLength = m_nBlockEnd;

  return 1;
}


////////// initialize haplotypes and counts ///////////
int DDP::Init( GenoHaploDB *pDB, int nstart, int nend )
{
  int		jj;
  int		nGroups = pDB->m_nGroups;

  assert( nstart < nend );

  srand( Seed );

  // Geno-Haplo data
  m_pData = pDB;

  m_nBlockStart = nstart;
  m_nBlockEnd = nend;
  m_nBlockLength = nend - nstart;
  m_nthIter = 0;

  int		numBlockT = m_nBlockLength;

  //  constants
  ab_h = beta_h + alpha_h;
  a1 = ( alpha_h ) / (ab_h );
  b1 = ( beta_h ) / (ab_h );
  ab_g = beta_g + alpha_g;
  a2	= (alpha_g) / (ab_g);
  b2	= (beta_g) / (ab_g);
  logB1 = log(B-1);
  logB = log(B);
  log_mu1=log(mu1);
  log_mu2=log(mu2);
  tiny = pow(10, -100); //10^(-100);
  //Avinash
  clearSS();
  m_B.clear();

  ////////////////////////////////////////////////

  ////////////////////////////////////////////////
  // create DP: in case of known ethnic group variable
  //for ( jj = 0; jj < nGroups; jj++ )
  //{
  //DP m_dp=new DP();
  // initialize DP ethnic groups
  //for ( jj = 0; jj < nGroups; jj++ )
  //{
  //m_dp[jj].m_pDataIndex = m_pData->m_DataInGroup[jj];
  //}
  //m_pDataIndex = m_pData->m_DataInGroup;

  int I = m_pData->m_numTotalI;

  //TODO

  if ( m_EqClassD == 0)
  {
    m_pData->Alloc2DMemory( &m_EqClassD, I, 2 );
  }
  int K=m_A.size();

  m_pData->Alloc2DMemory( &m_NumClassM, m_A.size(), numBlockT );
  InitializeQ(K);

  return 1;
}


////////// initialize haplotypes and counts ///////////
int DDP::Initialize( GenoHaploDB *pDB, int nstart, int nend )
{
  int		ii, tt, ee, it, cc;
  int		nGroups = pDB->m_nGroups;
  bool	new_class =1;

  assert( nstart < nend );

  // Geno-Haplo data
  m_pData = pDB;

  m_nBlockStart = nstart;
  m_nBlockEnd = nend;
  m_nBlockLength = nend - nstart;

  int		numBlockT = m_nBlockLength;

  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  int	**g_match = m_pData->m_g_match;
  int	**g_miss1 = m_pData->m_g_miss1;
  int	**g_miss2 = m_pData->m_g_miss2;
  int	***h_count = m_pData->m_h_count;
  vector<int> &q = m_NumClassQ;
  int jj =0; // d[0][0] =0
  //vector<int> &sum_mj = m_nSumClassN;

  clearSS();
  m_B.clear();

  // initialize with 1 class
  AddClass( 0, numBlockT, 0 );

  double tmp_gamma = m_gamma;
  m_gamma = 1;


  //m_pDataIndex = m_pData->m_DataInGroup;
  int I = m_pData->m_numTotalI;
  int K = m_A.size();
  InitializeQ(K);

  if ( m_EqClassD == 0 )
    m_pData->Alloc2DMemory( &m_EqClassD, I, 2);
  m_pData->Alloc2DMemory( &m_NumClassM, m_A.size(), numBlockT );
  InitializeQ(K);

  // initial assignment
  for ( ii = 0; ii < I ; ii++ )
  {
    // init h
    for ( tt = nstart; tt < nend; tt++)
    {
      float aa = rand()/(float)RAND_MAX;
      int aa1 = 0;
      if ( aa > 0.5 )		aa1 = 1;

      h[0][ii][tt] = g[aa1][ii][tt];
      h[1][ii][tt] = g[1-aa1][ii][tt];
      if ( h[0][ii][tt] == 255 )
      {
	h[0][ii][tt] = 0;
	h[1][ii][tt] = 0;
      }
    }

    // init g_match, g_miss1, g_miss2
    for (tt = nstart; tt < nend; tt++)
    {
      int g_id0 = MIN( g[0][ii][tt], g[1][ii][tt] );
      int g_id1 = MAX( g[0][ii][tt], g[1][ii][tt] );
      int h_id0 = MIN( h[0][ii][tt], h[1][ii][tt] );
      int h_id1 = MAX( h[0][ii][tt], h[1][ii][tt] );

      if ( g_id0 == h_id0 && g_id1 == h_id1 )
      {
	g_match[ii][tt] = 1;
	g_miss1[ii][tt] = g_miss2[ii][tt] = 0;
      }
      else if ( g_id0 != h_id0 && g_id1 != h_id1 )
      {
	g_miss2[ii][tt] = 1;
	g_miss1[ii][tt] = g_match[ii][tt] = 0;
      }
      else
      {
	g_miss1[ii][tt] = 1;
	g_miss2[ii][tt] = g_match[ii][tt] = 0;
      }
    }
  }

  // calculate u[0:2] for DP
  CalNumClassU();


  // rand permutation of ii
  vector<int> iivec( I );
  for ( ii = 0; ii < I; ii++ )	iivec[ii] = ii;
  random_shuffle( iivec.begin(), iivec.end() );


  // initialization other variables
  vector<int> &c = m_EqClassC;
  int ** d = m_EqClassD;
  vector<int> 	&p	= m_NumClassP; //upper leve;
  int **m	= m_NumClassM; //upper leve;
  vector<vector<int> >	&l	= m_NumClassL; //lower level 
  vector<vector<int> >	*la = m_NumClassLA;
  int			*u	= m_NumClassU;
  int J = m_NumClassP.size();

  for ( it = 0; it < I ; it++ )
  {
    for ( ee = 0; ee < 2; ee++ )
    {
      ii = iivec[it];



      double alpha0 = 0.7;

      //////////////////////////////////
      ///(2) 1. Sample c(i,e) DP
      //bad 
      //vector<unsigned char> 	&bj = m_B[jj];
      int cc = Sample_C(m_B[jj], m, alpha0 ); 

      c[jj] = cc;
      q[cc]++; // the update is not delayed because cc cannot change. 
      /// 1. Sample d(i,e)
      jj = Sample_D_Init( h[ee][ii], l, alpha0);
      d[ii][ee] = jj;

      if ( jj < J )
      {
	p[ jj ] ++;
	//sum_mj[ cc ] ++;
	new_class = 0;

      }
      else
      {
	new_class = 1;
	// add class
	AddClass( 1, numBlockT, cc );	// top-level
	J++;
      }
     cout << "new_class " << new_class << " jj " << jj << endl;  

      // update LA
      for ( tt = nstart; tt < nend; tt++ )
      {
	la[ h[ee][ii][tt] ][jj][ tt-nstart ] ++;
      }

      m_B[jj] = Sample_B(new_class, m_B[jj], m_A[cc], h[ee][ii],
	  q[cc], p[jj], l[jj], la[0][jj], la[1][jj],
	  m[cc],  h_count[ee][ii],
	  m_b_count[jj], u, I);

      J = m_B.size();

      // Check the logic of m' the ss for theta probably some mistake in -1 or +1 when calculating P_C_AB


      //
      for ( tt = nstart; tt < nend; tt++)
      {
	la[ h[ee][ii][tt] ][jj][ tt-nstart ] --;
      }

      ////////

      for ( tt = nstart; tt < nend; tt++ )
      {
	      cout <<"  p[jj]:"<<  p[jj]<<  "  l[jj][tt]:"<<  l[jj][tt]<< " size L"<< m_NumClassL.size()<<endl;
      }
      // sample the haplotype "h"
      Sample_H( h[ee][ii], h[1-ee][ii],
	  g[0][ii], g[1][ii], m_B[jj],
	  g_match[ii], g_miss1[ii], g_miss2[ii],
	  p[jj], l[jj], la[0][jj], la[1][jj],
	  h_count[ee][ii],
	  u, I);
    }	// end of e
  }	// end of i

  m_gamma = tmp_gamma;

  return 1;
}

////////////////////////////////////////////////////////////
/// do burnin iterations for Gibbs sampling combined with MH update for C variables
int DDP::Iterate_det_Gibbs_Met( int numIter, bool* bDone )
{
  int		ii,tt, ee, it, kk, bb;
  int		nGroups = m_pData->m_nGroups;
  bool	new_class;
  float	ErrT=0, ErrI=0, ErrSW=0;
  int		nstart = m_nBlockStart;
  int		nend = m_nBlockEnd;
  int		numBlockT = m_nBlockLength;

  // DB
  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  int	**g_match = m_pData->m_g_match;
  int	**g_miss1 = m_pData->m_g_miss1;
  int	**g_miss2 = m_pData->m_g_miss2;
  int	***h_count = m_pData->m_h_count;
  vector<int> &q = m_NumClassQ;
  vector<int> &p = m_NumClassP;
  //vector<int> &sum_mj = m_nSumClassN;

  int I = m_pData->m_numTotalI;


  m_nthIter = 0;
  printf( "##############################\n");
  printf( "Iterations for SNPs %d - %d\n", nstart, nend );
  printf( "##############################\n");
  printf("#Iter	Alpha	Gamma	K\n" );
  int K = m_NumClassQ.size();
  int **m	= m_NumClassM; //upper leve;
  vector<vector<int> >	&l	= m_NumClassL; //lower level 
  vector<vector<int> >	*la = m_NumClassLA;
  int						*u	= m_NumClassU;

  //double alpha0 = m_alpha; //TODO is 1 in DP constructor

  new_class =0;

  // iterate
  for ( int iter=0; iter < numIter; iter++ )
  {
    int J = m_NumClassP.size();

    m_Remove_Class.clear();
    for ( int jj=0; jj < J; jj++)
      m_Remove_Class.push_back(0);

    vector<bool>	&remove_class = m_Remove_Class;

    // rand permutation of ii (individuals)
    vector<int> iivec( I );
    for ( ii = 0; ii < I; ii++ )	iivec[ii] = ii;
    random_shuffle( iivec.begin(), iivec.end() );

    vector<int> &c = m_EqClassC;
    int ** d = m_EqClassD;
    int jj=0;
    // initialization other variables
    for ( it = 0; it < I ; it++ )
    {
      for ( ee = 0; ee < 2; ee++ )
      {
	ii = iivec[it];

	if ( !(bDone && bDone[ii]) ) 

	{

	  double alpha0 = 0.7; //TODO is 1 in DP constructor
	  cout << "434:" << m_NumClassL.size() << J << endl;
	  assert(m_NumClassL.size() == J);


	  //top level CC
	  //
	  jj=d[ii][ee];
	  //Delete top statistics. 
	  DeleteTopSS( jj, c[jj]);


	  //////////////////////////////////
	  ///(2) 1. Sample c(i,e) DP

	  int cc = Sample_C(m_B[jj], m, alpha0 ); 

	  c[jj] = cc;
	  q[cc]++; // the update is not delayed because cc cannot change. 


	  //bottom level


	  int	old_d = d[ii][ee];
	  // in BackOldSS variables p, la, l are backed up

	  BackupOldSS( old_d, remove_class ); 		   
	  // in DeleteSS variables q[cc]--, la[h]--, l are backed up. This is done for gibbs sampling because for sampling of cc the contribution due to cc needs to be removed.

	  DeleteBottomSS( ii, ee, d[ii][ee]);

	  old_lj = l[ old_d ];

	  if ( p[ old_d ] == 0 )
	  {
	    remove_class[ old_d ] = 1;
	  }
	  else
	    remove_class[ old_d ] = 0;
	  //////////////////////////////////
	  /// 1. Sample d(i,e)
	  int jj = Sample_D( h[ee][ii], l, alpha0);

	  d[ii][ee] = jj;
	  cout << " 476 J " << J << "m_NumClassL.size()" << m_NumClassL.size() << " jj " << jj << endl;  

	  // for back-up
	  assert(m_NumClassL.size() == J);
	  if ( d[ii][ee] < J )
	  {
	    BackupUpdateSS( jj, remove_class, 0 );

	    new_class = 0;
	    remove_class[ jj ] = 0;

	    p[ jj ] ++; //TODO why not l[j,t] is increasing, it will not increase here if its not used in Sample_B 



	    for ( tt = nstart; tt < nend; tt++)
	    {
	      la[ h[ee][ii][tt] ][jj][tt-nstart]++;
	    }
	  }
	  else
	  {
	    new_class = 1;
	    int iterj = 0;
	    while ( remove_class[iterj] == 0 && iterj < J )
	    {
	      iterj++;
	    }
	    if ( iterj < J )
	    {
	      jj = iterj;

	      BackupUpdateSS( jj, remove_class, 0 );

	      d[ii][ee] = jj;
	      remove_class[ jj  ] = 0; //TODO what the hell is this
	      p[ jj ] = 1;

	      for ( tt=0; tt < numBlockT; tt++)
	      {
		int hh = h[ee][ii][tt];
		l[ jj ][tt] = 0;
		la[ hh ][ jj ][tt] = 1;
		la[1-hh][ jj ][tt] = 0;
	      }
	    }
	    else
	    {
	      BackupUpdateSS( jj,  remove_class, 1 );

	      // add class
	      AddClass( 1, numBlockT, cc );	// top-level


	      remove_class.push_back(0);

	      for ( tt = nstart; tt < nend; tt++ )
	      {
		la[ h[ee][ii][tt] ][jj][ tt-nstart ] ++;
	      }

	      J++;
	      cout << " 537 J " << J << m_NumClassL.size() << m_NumClassP.size() << endl;
	    }
	  }
	  assert(m_NumClassL.size() == J);

	  cout << "new_class " << new_class << " jj " << jj << endl;  
	  vector<unsigned char> old_b = m_B[old_d];

	  // sample ancestor B
	  // (3) new_class is used from the last iteration  
	  vector<unsigned char> temp_b = Sample_B(new_class,
	      m_B[jj], m_A[cc],h[ee][ii],
	      q[cc], p[jj], l[jj], la[0][jj], la[1][jj],
	      m[cc],  h_count[ee][ii],
	      m_b_count[jj], u, I);
	  // (4) update q.  b_count it might not be used check and m are updated
	  // Sample_B
	  //

	  // Metropolis test
	  cout << "561 m_NumClassL:" << m_NumClassL.size() << " J " << J << endl; 
	  assertbug(548);
	  bool test = TestAcceptance( old_d, jj, h[ee][ii], old_b, temp_b, p, l );
	  cout << "563 test:" << test << "m_bClassAdded:" << m_bClassAdded <<  endl;

	  if ( test )
	  {
	    m_B[jj] = temp_b;
	  }
	  else
	  {
	    d[ii][ee] = old_d;
	    RollBack( ii, ee, old_d, jj );
	  }
	  J = m_NumClassP.size();

	  //
	  jj = d[ii][ee];



	  for (int ba = 0; ba < B; ba++)
	  {
	    for ( tt = nstart; tt < nend; tt++)
	    {
	      if (  ba == h[ee][ii][tt] )
		la[ba][jj][tt - nstart]--;
	    }
	  }

	  // sample the haplotype "h"
	  Sample_H( h[ee][ii], h[1-ee][ii],
	      g[0][ii], g[1][ii], m_B[jj],
	      g_match[ii], g_miss1[ii], g_miss2[ii],
	      p[jj], l[jj], la[0][jj], la[1][jj],
	      h_count[ee][ii],
	      u, I);
	  
	}
      }	// end of e
    }	// end of i
    // Date 20 april
    CondenseList( 1 );

    K = m_NumClassQ.size();

    //	if ( iter % 5 == 0 )
    {
      MergeList();
      CondenseList();//TODO
    }

    if ( m_pFPTheta)
      EstimateTheta( iter );

    if ( m_doConparam && iter >= 100 )
      Sample_Conparam( 1, 10 );

    if ( iter % 100 == 0 )
    {
      printf("%d	%.4f %.4f	%d\n", m_nthIter, m_alpha, m_gamma,  K );
    }

    m_nthIter++;

  }	// end of iter

  return 1;
}


int DDP::Iterate_cum_Gibbs_Met( int numIter, bool* bDone )
{
  int		ii, tt, ee, it, bb;
  int		nGroups = m_pData->m_nGroups;
  bool	new_class;

  int		nstart = m_nBlockStart;
  int		nend = m_nBlockEnd;
  int		numBlockT = m_nBlockLength;

  // DB
  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  int	**g_match = m_pData->m_g_match;
  int	**g_miss1 = m_pData->m_g_miss1;
  int	**g_miss2 = m_pData->m_g_miss2;
  int	***h_count = m_pData->m_h_count;
  vector<int> &q = m_NumClassQ;
  vector<int> &p = m_NumClassP;
  //vector<int> &sum_mj = m_nSumClassN;

  int I = m_pData->m_numTotalI;

  // initialize cum_h
  int *cross = new int[I];
  memset( cross, 0, sizeof(int)*I );
  m_pData->UpdateCumHaplotype( h, 0, nstart, nend, &cross );

  // backup h_old
  unsigned char **h_old[2];
  m_pData->Alloc2DMemory( &(h_old[0]), I, numBlockT );
  m_pData->Alloc2DMemory( &(h_old[1]), I, numBlockT );

  for (ii=0; ii<I; ii++)
  {
    memcpy( h_old[0][ii], &(h[0][ii][nstart]),
	sizeof(unsigned char)*numBlockT );
    memcpy( h_old[1][ii], &(h[1][ii][nstart]),
	sizeof(unsigned char)*numBlockT );
  }

  unsigned char **predh_old[2];
  if ( m_bCheckConvg )
  {
    m_pData->Alloc2DMemory( &(predh_old[0]), I, numBlockT );
    m_pData->Alloc2DMemory( &(predh_old[1]), I, numBlockT );

    for (ii=0; ii<I; ii++)
    {
      memcpy( predh_old[0][ii], &(h[0][ii][nstart]),
	  sizeof(unsigned char)*numBlockT );
      memcpy( predh_old[1][ii], &(h[1][ii][nstart]),
	  sizeof(unsigned char)*numBlockT );
    }

  }
  vector<int> &traceDiff = m_traceDiff;

  int K = m_NumClassQ.size();
  new_class =0;
  // iterate
  for ( int iter=0; iter < numIter; iter++ )
  {
    int J = m_NumClassP.size();

    m_Remove_Class.clear();
    for ( int jjt=0; jjt < J; jjt++)
      m_Remove_Class.push_back(0);

    vector<bool>	&remove_class = m_Remove_Class;

    // rand permutation of ii (individuals)
    vector<int> iivec( I );
    for ( ii = 0; ii < I; ii++ )	iivec[ii] = ii;
    random_shuffle( iivec.begin(), iivec.end() );

    vector<int> &c = m_EqClassC;
    int ** d = m_EqClassD;
    // initialization other variables
    int jj;
    for ( it = 0; it < I ; it++ )
    {
      for ( ee = 0; ee < 2; ee++ )
      {
	ii = iivec[it];

	if ( !(bDone && bDone[ii]) ) //TODO
	{

	  int **m	= m_NumClassM; //upper leve;
	  vector<vector<int> >	&l	= m_NumClassL; //lower level 
	  vector<vector<int> >	*la = m_NumClassLA;
	  int						*u	= m_NumClassU;

	  //double alpha0 = m_alpha; //TODO is 1 in DP constructor
	  double alpha0 = 0.7; //TODO is 1 in DP constructor
	  //top level CC
	  jj=d[ii][ee];
	  // in BackOldSS variables n, la, l are backed up


	  // in DeleteSS variables n[cc]--, la[h]--, l are backed up. This is done for
	  // gibbs sampling because for sampling of cc the contribution due to cc needs to be removed.
	  // (1)

	  DeleteTopSS( jj, c[jj]);


	  //////////////////////////////////
	  ///(2) 1. Sample c(i,e) DP

	  int cc = Sample_C(m_B[jj], m, alpha0 ); 

	  c[jj] = cc;
	  q[cc]++; // the update is not delayed because cc cannot change. 



	  //bottom level


	  int	old_d = d[ii][ee];
	  // in BackOldSS variables p, la, l are backed up

	  BackupOldSS( old_d, remove_class ); 		   
	  // in DeleteSS variables q[cc]--, la[h]--, l are backed up. This is done for gibbs sampling because for sampling of cc the contribution due to cc needs to be removed.

	  DeleteBottomSS( ii, ee, d[ii][ee]);

	  old_lj = l[ old_d ];

	  if ( p[ old_d ] == 0 )
	  {
	    remove_class[ old_d ] = 1;
	  }
	  else
	    remove_class[ old_d ] = 0;
	  //////////////////////////////////
	  /// 1. Sample c(i,e)
	  int jj = Sample_D( h[ee][ii], l, alpha0);

	  d[ii][ee] = jj;

	  // for back-up
	  if ( d[ii][ee] < J )
	  {
	    BackupUpdateSS( jj, remove_class, 0 );

	    new_class = 0;
	    remove_class[ jj ] = 0;

	    p[ jj ] ++;



	    for ( tt = nstart; tt < nend; tt++)
	    {
	      la[ h[ee][ii][tt] ][jj][tt-nstart]++;
	    }
	  }
	  else
	  {
	    new_class = 1;
	    int iterj = 0;
	    while ( remove_class[iterj] == 0 && iterj < J )
	    {
	      iterj++;
	    }
	    if ( iterj < J )
	    {
	      jj = iterj;

	      BackupUpdateSS( jj, remove_class, 0 );

	      d[ii][ee] = jj;
	      remove_class[ jj  ] = 0; //TODO what the hell is this
	      p[ jj ] = 1;

	      for ( tt=0; tt < numBlockT; tt++)
	      {
		int hh = h[ee][ii][tt];
		l[ jj ][tt] = 0;
		la[ hh ][ jj ][tt] = 1;
		la[1-hh][ jj ][tt] = 0;
	      }
	    }
	    else
	    {
	      BackupUpdateSS( jj,  remove_class, 1 );

	      // add class
	      AddClass( 1, numBlockT, cc );	// top-level


	      remove_class.push_back(0);

	      for ( tt = nstart; tt < nend; tt++ )
	      {
		la[ h[ee][ii][tt] ][jj][ tt-nstart ] ++;
	      }

	      J++;
	    }
	  }

	  vector<unsigned char> old_b = m_B[old_d];

	  // sample ancestor A
	  // (3) new_class is used from the last iteration  
	  vector<unsigned char> temp_b = Sample_B(new_class,
	      m_B[jj], m_A[cc],h[ee][ii],
	      q[cc], p[jj], l[jj], la[0][jj], la[1][jj],
	      m[cc],  h_count[ee][ii],
	      m_b_count[jj], u, I);
	  // (4) update q.  b_count it might not be used check and m are updated
	  // Sample_B
	  //

	  // Metropolis test
	  bool test = TestAcceptance( old_d, jj, h[ee][ii], old_b, temp_b, p, l );

	  if ( test )
	  {
	    m_B[jj] = temp_b;
	  }
	  else
	  {
	    d[ii][ee] = old_d;
	    RollBack( ii, ee, old_d, jj );
	  }

	  //
	  jj = d[ii][ee];
	  //top level CC
	  // in BackOldSS variables n, la, l are backed up


	  // in DeleteSS variables n[cc]--, la[h]--, l are backed up. This is done for
	  // gibbs sampling because for sampling of cc the contribution due to cc needs to be removed.
	  // (1)

	  DeleteTopSS( jj, c[jj]);


	  //////////////////////////////////
	  for (int ba = 0; ba < B; ba++)
	  {
	    for ( tt = nstart; tt < nend; tt++)
	    {
	      if (  ba == h[ee][ii][tt] )
		la[ba][jj][tt - nstart]--;
	    }
	  }
	  //1 may 5 am

	  // sample the haplotype "h"
	  Sample_H( h[ee][ii], h[1-ee][ii],
	      g[0][ii], g[1][ii], m_B[jj],
	      g_match[ii], g_miss1[ii], g_miss2[ii],
	      p[jj], l[jj], la[0][jj], la[1][jj],
	      h_count[ee][ii],
	      u, I);
	}
      }	// end of e
    }	// end of i
    CondenseList( 1 );

    K = m_NumClassQ.size();

    //	if ( iter % 5 == 0 )
    {
      MergeList();
      CondenseList();//TODO
    }

    if ( m_pFPTheta)
      EstimateTheta( iter );

    if ( m_doConparam && iter >= 100 )
      Sample_Conparam( 1, 10 );

    if ( iter % 100 == 0 )
    {
      printf("%d	%.4f %.4f	%d\n", m_nthIter, m_alpha, m_gamma,  K );
    }

    m_nthIter++;

  }

  // delete memory
  for (ii=0 ; ii < I; ii++)
  {
    if ( h_old[0][ii] ) delete [] h_old[0][ii];
    if ( h_old[1][ii] ) delete [] h_old[1][ii];
  }
  if ( h_old[0] ) delete [] h_old[0];
  if ( h_old[1] ) delete [] h_old[1];

  if ( cross ) delete [] cross;

  return 1;
}

//////////////////////////////////////////////////////////
// If there are duplications in Ancestoral pool, 
// merge them into one and update the corresponding counters
///////////////////////////////////////////////////////////
int DDP::MergeList()
{
  int J = m_B.size();
  int jj, tt, ka;

  vector<vector<unsigned char> > B_new;
  vector<int> MapJ(J,0);
  int ind = 0;
  vector<bool> bMapped(J,0);

  for (jj=0; jj<J; jj++)
  {
    if ( bMapped[jj] == 0 )
    {
      m_Remove_Class[jj] = 0;
      MapJ[jj] = jj;
      bMapped[jj] = 1;
      vector<unsigned char> cmp( m_B[jj] );
      for (ka=jj+1; ka<J; ka++)
      {
	if ( bMapped[ka] == 0 )
	{
	  if ( m_B[ka] == m_B[jj] )
	  {
	    m_Remove_Class[ka] = 1;
	    MapJ[ka] = jj;
	    bMapped[ka] = 1;

	    m_NumClassP[jj] += m_NumClassP[ka];
	    //m_nSumClassN[jj] += m_nSumClassN[ka];
	    for (tt=0; tt<m_nBlockLength; tt++)
	    {
	      m_NumClassLA[0][jj][tt] += m_NumClassLA[0][ka][tt];
	      m_NumClassLA[1][jj][tt] += m_NumClassLA[1][ka][tt];
	      m_NumClassL[jj][tt] += m_NumClassL[ka][tt];
	    }
	  }
	}
      }
      ind++;
    }
  }

  for (int ii=0; ii < m_pData->m_numTotalI; ii++)
  {
    for( int ee=0; ee<2; ee++)
    {
      int jj = m_EqClassD[ii][ee] ;
      assert( MapJ[jj] >= 0 );
      m_EqClassD[ii][ee] = MapJ[jj];

    }

  }
  return 1;
}


///////////////////////////////////////////
// Remove the empty class from the list
//	and update the counters and variables
int DDP::CondenseList( int bBackupB )
{
  int ii, ee, jj;
  int J = m_NumClassP.size();

  int ind = 0, rem_j;
  vector<int>	MapJ(J,0);
  bool foundR = 0;
  jj = 0;
  while ( jj < J )
  {
    if ( m_Remove_Class[jj] == 0 )
      //		if ( m_nSumClassN[jj] > 0 )
    {
      MapJ[jj] = ind;
      ind++;
    }
    else
    {
      MapJ[jj] = -1;
      if ( foundR == 0 )
      {
	foundR = 1;
	rem_j = ind;
      }
    }
    jj++;
  }

  if ( foundR == 0 )
    return 1;

  //cout << "J : " << J << " P -size : " << m_NumClassP.size()<< endl;

  ///////////
  for (jj=J-1; jj>=rem_j; jj--)
  {
    if ( MapJ[jj] < 0 )
    {
      if (  bBackupB == 1 )
	m_Buffer_B.push_back( m_B[jj] );
      //cout << "jj : " << jj << endl;

      m_NumClassP.erase( m_NumClassP.begin()+jj );
      //m_nSumClassN.erase( m_nSumClassN.begin()+jj );
      m_B.erase( m_B.begin()+jj );

      //			m_Remove_Class.erase( m_Remove_Class.begin()+jj );

      //for (jj=0; jj<J; jj++)
      //{
      m_NumClassL.erase( m_NumClassL.begin()+jj );
      m_NumClassLA[0].erase( m_NumClassLA[0].begin()+jj );
      m_NumClassLA[1].erase( m_NumClassLA[1].begin()+jj );
      //}

    }
  }

  int I = m_pData->m_numTotalI;

  for ( ii=0; ii < I; ii++)
  {
    for ( ee=0; ee<2; ee++)
    {
      int jj = m_EqClassD[ii][ee];
      assert( MapJ[jj] >= 0 );
      m_EqClassD[ii][ee] = MapJ[jj];
    }
  }

  return 1;
}

//////////////////////////////////////////////////////
//	Sample 
//		h: haplotype to be sampled
//		h1: complementary haplotype to h
//		g0, g1: genotypes
//		ak: ancestral haplotype for h to be inherited
//		g_match, g_miss1, g_miss2: ss for H->G observation
//		mk, lk, lak0, lak1, h_count, u: ss
//		Ij: the number of individuals of group j
///////////////////////////////////////////////////////
int DDP::Sample_H( unsigned char *h, unsigned char *h1,
    unsigned char *g0, unsigned char *g1,
    vector<unsigned char>  &bk,
    int	*g_match, int *g_miss1, int *g_miss2,
    int pj, vector<int> &lj, vector<int> &laj0, vector<int> &laj1,
    int *h_count,
    int *u, int I)
{
  int tt, bb;
  int numBlockT = m_nBlockLength;
  int nstart = m_nBlockStart;
  int	nend = m_nBlockEnd;
  int minh, maxh, ming, maxg;

  vector<int> &q = m_NumClassQ;
  int K = q.size();

  double log_a2 = log(a2);
  double log_b2 = log(b2);
  vector<double> log_pH;
  int bstart;
  for ( tt = nstart; tt < nend; tt++ )
  {
    bstart = tt - nstart;
	  int u_temp[3][B];
    memset( (int*)u_temp, 0, sizeof(int)*3*B );
    u[0] = u[0] - g_match[tt];
    u[1] = u[1] - g_miss1[tt];
    u[2] = u[2] - g_miss2[tt];
    for (bb = 0; bb < B; bb++)
    {
      minh = MIN( bb, h1[tt] );
      maxh = MAX( bb, h1[tt] );
      ming = MIN( g0[tt], g1[tt] );
      maxg = MAX( g0[tt], g1[tt] );
      if ( (minh == ming) && (maxh == maxg) )
	u_temp[0][bb] = 1;
      else if ( ( minh != ming ) && (maxh != maxg ) )
	u_temp[2][bb] = 1;
      else
	u_temp[1][bb] = 1;
    }
    int l_temp[] = {0, 0};
   if (bk[bstart]) l_temp[1] = 1;
   else l_temp[0] = 1;  
   //l_temp[ bk[bstart] ] = 1;

    log_pH.clear();
    double min_log_pH = INF;
    for (bb = 0; bb < B; bb++)
    {
      double lp = lgamma( alpha_h + lj[bstart] + l_temp[bb] )
	+ lgamma( beta_h + pj - lj[bstart] - l_temp[bb])
	- lgamma( pj + ab_h )
	- ( pj - lj[bstart] - l_temp[bb])*logB1 +
	( u[0] + u_temp[0][bb] )*log_a2 +
	( I*m_nBlockLength - u[0] - u_temp[0][bb])*log_b2
	+ ( u[1] + u_temp[1][bb] )*log_mu1
	+ ( u[2] + u_temp[2][bb] )*log_mu2;

      log_pH.push_back( lp );
      if ( lp < min_log_pH )
	min_log_pH = lp;
    }
    for (bb=0; bb<B; bb++)
      log_pH[bb] = exp( log_pH[bb] - min_log_pH );

    normalize( log_pH );
    // sample ht
    h[tt] = sample_discrete( log_pH );

    if ( bk[bstart] == h[tt] )
      h_count[tt] = 1;
    else
      h_count[tt] = 0;

    bb = h[tt];
    minh = MIN( bb, h1[tt] );
    maxh = MAX( bb, h1[tt] );
    ming = MIN( g0[tt], g1[tt] );
    maxg = MAX( g0[tt], g1[tt] );
    if ( (minh == ming) && (maxh == maxg) )
    {
      g_match[tt] = 1;
      g_miss1[tt] = 0;
      g_miss2[tt] = 0;

      u[0] += 1;
    }
    else if ( ( minh != ming ) && (maxh != maxg ) )
    {
      g_match[tt] = 0;
      g_miss1[tt] = 0;
      g_miss2[tt] = 1;

      u[2] += 1;
    }
    else
    {
      g_match[tt] = 0;
      g_miss1[tt] = 1;
      g_miss2[tt] = 0;

      u[1] += 1;
    }


    lj[bstart] += h_count[tt];
    if ( h[tt] == 0 )
      laj0[bstart] += 1;
    else
      laj1[bstart] += 1;

  }

  return 1;
}




int DDP::CalNumClassU()
{
  int ii, tt;
  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  int		**g_match = m_pData->m_g_match;
  int		**g_miss1 = m_pData->m_g_miss1;
  int		**g_miss2 = m_pData->m_g_miss2;

  int nGroups = m_pData->m_nGroups;

  int		*u		= m_NumClassU;
  int		I	= m_pData->m_numTotalI;
  //int	&pIndex = m_pDataIndex;
  u[0] = u[1] = u[2] = 0;

  for ( int ii = 0; ii < I; ii++ )
  {
    //ii = pIndex[it];
    for ( tt = m_nBlockStart; tt < m_nBlockEnd; tt++ )
    {
      if ( g_match[ii][tt] == 1 )
	u[0] ++;
      else if ( g_miss1[ii][tt] == 1 )
	u[1] ++;
      else if ( g_miss2[ii][tt] == 1 )
	u[2] ++;
    }
  }
  int sumU = u[0] + u[1] + u[2];
  int blockSize = m_nBlockEnd - m_nBlockStart;
  //cout << "blockSize " << blockSize << " sumU " << sumU << endl;
  assert(sumU == blockSize*I);
  return 1;
}

///////////////////////////////////////////////////////
//		Propose C(i,e) from prior(tau)
//		l = numClassL
//		FromTopLevel = whether the draw is from top or bottom urn
//		@returns: which color is drawn Cie
///////////////////////////////////////////////////////
int DDP::Sample_D( unsigned char *h, 
    vector<vector<int> > &l, double alpha0)
{
  int dd, jj;
  // p = number of haplotypes for a given ancestor
  vector<int> &p = m_NumClassP;
  int J = p.size();
  //double a_nsumg = alpha0 / ( sum( p ) + m_gamma ) ;

  // DP_vec stores discrete prob dist of phi's (each \phi denotes random variable associated with the color varying from 1..J+1, J+1 is new color, it is associated with lower urn) //sum(DP_Vec) = 1
  // alpha0 = tau
  vector<double>	DP_vec;

  //this is proababilty of choosing old color.
  for (jj=0; jj < J; jj++)
  {
    DP_vec.push_back( p[jj]);
  }
  //this is proababilty of choosing new color //eqn:2
  DP_vec.push_back( alpha0 );

  normalize( DP_vec );

  dd = sample_discrete( DP_vec );

  // determine whether from bottom level or from top level

  return dd;
}


//int DDP::Sample_D_Init(unsigned char *h,
int DDP::Sample_D_Init(unsigned char *h,
    vector<vector<int> > &l, double alpha0)
{
  int dd;
  int jj, tt;
  vector<int> &p = m_NumClassP;
  int J = p.size();


  // ph | a
  double *log_ph = new double[J+1];
  double *log_ph_temp = new double[J+1];

  for ( jj = 0 ; jj < J ; jj++ )
  {
    double	mc = log( p[jj] + ab_h );
    double	mc1 = mc + logB1;
    log_ph[jj] = 0;
    if ( p[jj] != 0 )
    {
      double log_pht;
      for (tt = m_nBlockStart; tt < m_nBlockEnd; tt++)
      {
	if ( m_B[jj][tt - m_nBlockStart] == h[tt] )
	  log_pht = log( alpha_h + l[jj][tt-m_nBlockStart] ) - mc;
	else
	  log_pht = log( beta_h + p[jj] - l[jj][tt-m_nBlockStart] ) - mc1;
	log_ph[jj] += log_pht;
      }
      log_ph_temp[jj] = log_ph[jj];
    }
    else
    {
      log_ph[jj] = -INF;
      log_ph_temp[jj] = INF;
    }
  }
  log_ph[J] = -m_nBlockLength * logB;
  log_ph_temp[J] = log_ph[J];

  double *log_DP_predict = new double[J+1];
  double *log_DP_predict_temp = new double[J+1];
  double min_DP_predict = INF;

  // DP_vec
  vector<double>	DP_vec;
  DP_vec.assign( J+1, 0 );
  int sumn = (int)sum<int>( p );

  for ( jj = 0; jj < J; jj++)
  {
    DP_vec[jj] = p[jj];
  }
  DP_vec[J] = alpha0;

  normalize( DP_vec );

  for ( jj = 0; jj < J+1; jj++ )
  {
    double logdp		= log( DP_vec[jj] + tiny );

    log_DP_predict[jj]	= log_ph[jj] + logdp;
    log_DP_predict_temp[jj] = log_ph_temp[jj] + logdp;

    if ( log_DP_predict_temp[jj] < min_DP_predict )
      min_DP_predict = log_DP_predict_temp[jj];
  }

  vector<double> DP_predict;
  for ( jj = 0; jj < J+1; jj++ )
  {
    double tmp = log_DP_predict[jj] - min_DP_predict;
    //printf("tmp %f\p", tmp);
    if ( tmp < -30 ) tmp = 0;
    else	tmp = exp( tmp );
    //printf("dp_predict %f\p", tmp);

    DP_predict.push_back( tmp  );
  }
  normalize( DP_predict );

  dd = sample_discrete( DP_predict );


  if ( log_ph ) delete [] log_ph;
  if ( log_ph_temp ) delete [] log_ph_temp;
  if ( log_DP_predict ) delete [] log_DP_predict;
  if ( log_DP_predict_temp ) delete [] log_DP_predict_temp;

  return dd;
}

int DDP::clearSS()
{
  m_NumClassP.clear();
  //m_NumClassQ.clear();
  m_NumClassL.clear();
  m_NumClassLA[0].clear(); 
  m_B.clear();
  m_EqClassC.clear();
  m_b_count.clear();
  m_NumClassLA[1].clear();


  return 1;
}

int DDP::AddClass( int initvalue, int numT, int cc )
{
  // initialize with 1 class
  m_NumClassP.push_back( initvalue );
  //m_NumClassQ.push_back( initvalue );

  vector<unsigned char> zeros( numT, 0 );
  m_B.push_back( zeros );
  vector<int> zeros1( numT, 0 );
  m_NumClassL.push_back( zeros1 );
  m_NumClassLA[0].push_back( zeros1 );
  m_NumClassLA[1].push_back( zeros1 );
  m_EqClassC.push_back(cc);
  m_b_count.push_back(zeros1);
  cout<< " addclass " << m_NumClassP.size() << m_NumClassL.size() <<m_NumClassLA[0].size() << endl;

  return 1;
}


int DDP::DeleteTopSS( int jj, int cc)
{
  vector<int> &b_count = m_b_count[jj];


  m_NumClassQ[ cc ] -= 1;


  for (int tt = 0; tt < m_nBlockLength; tt++)
  {
    m_NumClassM[ cc ][tt] -= b_count[ tt];
  }

  return 1;
}

int DDP::DeleteBottomSS( int ii, int ee, int jj)
{
  unsigned char *h = m_pData->m_Haplotypes[ee][ii];
  int *h_count = m_pData->m_h_count[ee][ii];


  //m_nSumClassN[ jj ] -= 1;
  m_NumClassP[ jj ] -= 1;


  for (int tt = 0; tt < m_nBlockLength; tt++)
  {
    m_NumClassLA[ h[tt+m_nBlockStart]][jj][tt] -= 1;
    m_NumClassL[ jj ][tt] -= h_count[ tt+m_nBlockStart ];
  }

  return 1;
}
// Metropolis conditio n for test acceptance
bool DDP::TestAcceptance( int old_d, int new_d, unsigned char *h,
    vector<unsigned char> &old_b, vector<unsigned char> &temp_a,
    vector<int> &p, vector<vector<int> > &l )
{
  if ( (old_d == new_d) && (temp_a == old_b) )
    return 1;

  //if ( old_d != new_d )
  //int test = 1;

  double	mc = log( p[old_d] + ab_h );
  double	mc1 = mc + logB1;

  double	log_ph_old = 0;
  double	log_ph_new = 0;
  // n = number of descendent from each ancestor K
  double	mc_n = log( p[new_d] + ab_h );
  double	mc1_n = mc_n + logB1;

  // posterior likelihood of h(:,i,e)
  for (int tt = 0; tt < m_nBlockLength; tt++)
  {
    assert( p[old_d] >= l[old_d][tt] );

    if ( old_b[tt] == h[tt+m_nBlockStart] )
      log_ph_old += log( alpha_h + l[old_d][tt] ) - mc;
    else
      log_ph_old += log( beta_h + p[ old_d ] - l[old_d][tt] ) - mc1;

    cout <<"  p[new_d]:"<<  p[new_d]<<  "  l[new_d][tt]:"<<  l[new_d][tt]<< " new_d:" << new_d 
	   << " tt " << tt << endl;
    assert( p[new_d] >= l[new_d][tt] );
    if ( temp_a[tt] == h[tt+m_nBlockStart] )
      log_ph_new += log( alpha_h + l[old_d][tt] ) - mc;
    else
      log_ph_new += log( beta_h + p[ old_d ] - l[old_d][tt] ) - mc1;
  }


  double acceptance_p = MIN( 1, exp(log_ph_new - log_ph_old) );

  if ( (double)rand()/(double)(RAND_MAX) <= acceptance_p )
    return 1;
  else
    return 0;
}

int	DDP::BackupOldSS( int old_d, vector<bool> &remove_class  )
{

  old_pj = m_NumClassP[ old_d ];

  old_remove_classj = remove_class[ old_d ];


  old_la0j = m_NumClassLA[0][ old_d ];
  old_la1j = m_NumClassLA[1][ old_d ];

  old_lj = m_NumClassL[ old_d ];

  return 1;
}

// new_d: current color sampled
// classAdded : whether new color has been sampled or not
//
int	DDP::BackupUpdateSS( int new_d, vector<bool> &remove_class, bool classAdded  )
{
  m_bClassAdded = classAdded;

  if ( !m_bClassAdded )
  {

    temp_pj = m_NumClassP[ new_d ];

    temp_remove_classj = remove_class[ new_d ];


    temp_la0j = m_NumClassLA[0][ new_d ];
    temp_la1j = m_NumClassLA[1][ new_d ];

    temp_lj = m_NumClassL[ new_d ];
  }

  return 1;
}

int DDP::RollBack( int ii, int ee, int old_d, int new_d )
{
  m_NumClassP[old_d] = old_pj;
  m_Remove_Class[old_d] = old_remove_classj;

  m_NumClassLA[0][ old_d ] = old_la0j;
  m_NumClassLA[1][ old_d ] = old_la1j;
  m_NumClassL[ old_d ] = old_lj;

  if ( m_bClassAdded )
    DeleteClass();
  else if ( old_d != new_d )
  {
    m_NumClassP[new_d] = temp_pj;
    m_Remove_Class[new_d] = temp_remove_classj;

    m_NumClassLA[0][ new_d ] = temp_la0j;
    m_NumClassLA[1][ new_d ] = temp_la1j;
    m_NumClassL[ new_d ] = temp_lj;
  }



  return 1;
}

////////////////////////////////////////////////////////
//	gather posterior samples and save posterior mean
////////////////////////////////////////////////////////
int DDP::Sample_Pred()
{
  int I = m_pData->m_numTotalI;

  int nstart = m_nBlockStart;
  int nend = m_nBlockEnd;
  //int J = m_dp.size();

  int ii, tt;

  unsigned char **pred_h[2];

  pred_h[0] = m_pData->m_Pred_Haplotypes[0];
  pred_h[1] = m_pData->m_Pred_Haplotypes[1];

  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***h0 = m_pData->m_TrueHaplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  unsigned char **g_raw = m_pData->m_RawGenotypes;
  int		 **cum_h[2][2];

  for( int bb=0; bb < B; bb++)
  {
    cum_h[bb][0] = m_pData->m_Cum_Haplotypes[bb][0];
    cum_h[bb][1] = m_pData->m_Cum_Haplotypes[bb][1];
  }

  for ( ii = 0 ; ii < I; ii ++ )
  {
    for ( tt=nstart; tt<nend; tt++)
    {
      if ( cum_h[0][0][ii][tt] > cum_h[1][0][ii][tt] )
	pred_h[0][ii][tt] = 0;
      else
	pred_h[0][ii][tt] = 1;

      if ( cum_h[0][1][ii][tt] > cum_h[1][1][ii][tt] )
	pred_h[1][ii][tt] = 0;
      else
	pred_h[1][ii][tt] = 1;
    }
  }

  int ct3=0, cind = 0, mis1=0;
  int hetI = 0;
  for (ii=0 ; ii < I; ii++)
  {
    int ct1 = 0;
    int ct2 = 0;
    int hetero = 0;

    for (tt = nstart ; tt < nend ; tt++)
    {
      if ( g_raw[ii][tt] == 2 )
      {
	unsigned char h0 = pred_h[0][ii][tt],
		      h1 = pred_h[1][ii][tt];
	if ( pred_h[0][ii][tt] == g[0][ii][tt] )
	  h1 = g[1][ii][tt];

	else if ( pred_h[0][ii][tt] == g[1][ii][tt] )
	  h1 = g[0][ii][tt];

	if ( pred_h[1][ii][tt] == g[0][ii][tt] )
	  h0 = g[1][ii][tt];

	else if ( pred_h[1][ii][tt] == g[1][ii][tt] )
	  h0 = g[0][ii][tt];

	pred_h[0][ii][tt] = h0;
	pred_h[1][ii][tt] = h1;
      }
      if ( g_raw[ii][tt] == 0 || g_raw[ii][tt] == 1 )
      {
	pred_h[0][ii][tt] = g[0][ii][tt];
	pred_h[1][ii][tt] = g[1][ii][tt];
      }
    }
  }

  return 1;
}

int DDP::LoadOutput( const char *inputfile, int nstart, int nend,
    unsigned char*** pred_h )
{
  int tt, ee;

  FILE *fp = fopen( inputfile, "r" );

  // 0. Load Header
  char tmp[500];
  int pos = -1;
  while ( !feof( fp ) )
  {
    fgets( tmp, 500, fp );
    string oline( tmp );
    pos = oline.find( "[H]" );
    if ( pos >= 0 ) break;
  }

  if ( feof( fp ) )
    return 0;

  int i = -1;
  pos = 0;
  while (  pos >= 0 && ~feof( fp ) )
  {
    fgets( tmp, 500, fp );
    string oline( tmp );
    pos = oline.find(  "---" );
    int post = oline.find(  "," );
    if ( pos < 0 )
      break;

    i++;
    string stmp = oline.substr(0, post);
    int it;
    sscanf( stmp.c_str(), "%d", &it );
    //		printf( "%d, %d, %s \n", i,it, tmp );
    if ( it != i )
      break;
    char h_ie[5000];

    for ( ee = 0; ee < 2; ee++ )
    {
      fgets( h_ie, 5000, fp );

      for ( tt = nstart; tt < nend; tt++ )
      {
	char ochar = h_ie[tt-nstart];
	//				pred_h[ee][i][tt] = atoi( &(ochar) );
	if ( ochar == '0' )
	  pred_h[ee][i][tt] = 0;
	else
	  pred_h[ee][i][tt] = 1;
	////////////////////////////////
	m_pData->m_Pred_Haplotypes[ee][i][tt] = pred_h[ee][i][tt] ;
	////////////////////////////////
      }
    }
    if ( feof(fp) )
      break;
  }


  GetPredFreq( m_pData->m_Pred_Haplotypes  );
  fclose(fp);

  return 1;
}

/*
   int DDP::Save( const char *inputfile, const char *outdir, int blocknum )
   {
   string infname(inputfile);
   int pos = infname.rfind("/");
   infname = infname.substr( pos+1, infname.length() );

   int dot = infname.find(".");
   infname = infname.substr( 0, dot );

   string filename(outdir);
   filename += infname;

   char fnamet[200];
   sprintf( fnamet, "_BL%d_B%d.txt",  m_nBlockLength, blocknum );

   filename +=  string( fnamet );

   int ii, jj, tt, kk, ee;
   int K = m_A.size();
   FILE *fp = fopen( filename.c_str(), "w" );

// 0. Save Header 
fprintf( fp, "********************************\n" );
fprintf( fp, "*	  DDP Haplotyper \n" );
fprintf( fp, "********************************\n\n" );

// 4. Save H
fprintf( fp, "[H]\n" );
int I = m_pData->m_numTotalI;
unsigned char **pred_h[2];
pred_h[0] = m_pData->m_Pred_Haplotypes[0];
pred_h[1] = m_pData->m_Pred_Haplotypes[1];
for (ii=0; ii < I; ii++)
{
fprintf( fp, "%d, G%d --- %d, %d	\n", ii, m_pData->m_EthnicGroup[ii], 
m_EqClass[ii][0], m_EqClass[ii][1] );
for (ee=0; ee<2; ee++)
{
for (tt = m_nBlockStart; tt< m_nBlockEnd; tt++)
fprintf( fp, "%d", pred_h[ee][ii][tt] );

fprintf( fp, "\n" );
}
}

fprintf( fp, "\n" );
// 1. Save A
fprintf( fp, "[A]\n" );
fprintf( fp, "ID	Frequency	%%	Haplotype\n" );
for (kk=0; kk < K; kk++)
{
fprintf( fp, "%d	%d	%.6f	", kk, m_nSumClassN[kk], m_nSumClassN[kk]/(float)(2*I) );
for (tt = 0; tt< m_nBlockLength; tt++)
{
fprintf( fp, "%d", m_A[kk][tt] );
}
fprintf( fp, "\n" );
}

fprintf( fp, "\n%2f\n\n", m_gamma );

// 3. Save DP
for (jj=0; jj<m_dp.size(); jj++)
{
fprintf( fp, "[DP-%d]\n", jj );
fprintf( fp, "%2f\n", m_dp[jj].m_alpha );
for (kk=0; kk < K; kk++)
{
fprintf( fp, "%3d ", m_dp[jj].m_NumClassN[kk] );
}
fprintf( fp, "\n" );
}

fclose(fp);
return 1;
}
  */
int DDP::Save( const char *inputfile, const char *outdir, int tstart, int tend )
{
  string infname(inputfile);
  int pos = infname.rfind("/");
  infname = infname.substr( pos+1, infname.length() );

  int dot = infname.find(".");
  infname = infname.substr( 0, dot );

  string filename(outdir);
  filename += infname;

  char fnamet[200];
  sprintf( fnamet, "_hap_T%d_%d.txt", tstart, tend );

  filename +=  string( fnamet );

  int ii, tt, kk, ee;
  int K = m_A.size();
  FILE *fp = fopen( filename.c_str(), "w" );

  // 0. Save Header
  fprintf( fp, "*******************************************\n" );
  fprintf( fp, "*  Phased haplotypes from DDP Haplotyper \n" );
  fprintf( fp, "*******************************************\n\n" );

  // 4. Save H
  fprintf( fp, "[H]\n" );
  int I = m_pData->m_numTotalI;
  unsigned char **pred_h[2];
  pred_h[0] = m_pData->m_Pred_Haplotypes[0];
  pred_h[1] = m_pData->m_Pred_Haplotypes[1];
  for (ii=0; ii < I; ii++)
  {
    fprintf( fp, "%d, G%d --- %d, %d	\n", ii, m_pData->m_EthnicGroup[ii],
	m_EqClassD[ii][0], m_EqClassD[ii][1] );
    for (ee=0; ee<2; ee++)
    {
      for (tt = m_nBlockStart; tt< m_nBlockEnd; tt++)
	fprintf( fp, "%d", pred_h[ee][ii][tt] );

      fprintf( fp, "\n" );
    }
  }

  fprintf( fp, "\n" );
  // 1. Save A
  fprintf( fp, "[A]\n" );
  fprintf( fp, "ID	Frequency	%%	Haplotype\n" );
  for (kk=0; kk < K; kk++)
  {
    fprintf( fp, "%d	%d	%.6f	", kk, m_NumClassQ[kk], m_NumClassQ[kk]/(float)(2*I) );
    for (tt = 0; tt< m_nBlockLength; tt++)
    {
      fprintf( fp, "%d", m_A[kk][tt] );
    }
    fprintf( fp, "\n" );
  }

  fclose(fp);
  return 1;
}

//TODO original function was 
//int DDP::Sample_Conparam( int numiter_a, int numiter_b, int *alpha )
int DDP::Sample_Conparam( int numiter_a, int numiter_b )
{
  int iter, nd, zz, kk;
  double aa, bb, xx;
  double alpha, gamma;

  int K = m_A.size();
  alpha = m_alpha;

  for ( iter = 0 ; iter < numiter_a ; iter++ )
  {
    aa = alpha_a;
    bb = alpha_b;
    //for ( jj = 0 ; jj < J ; jj++ )
    //{
    //nd = m_pDataIndex.size();
    nd =  m_pData->m_numTotalI;
    xx = randbeta( alpha + 1.0, nd );
    zz = ( drand48() * (alpha + nd) < nd );
    // numTable_jj:
    for (kk=0; kk < K; kk++)
    {
      if ( m_NumClassQ[kk] > 0 )
	aa++;
    }
    aa -= zz;
    bb -= log(xx);
    //}
    alpha = randgamma(aa) / bb;
  }
  //for ( jj=0; jj < J; jj++)
  //m_dp[jj].m_alpha = alpha;
  m_alpha = alpha;

  gamma = m_gamma;
  for ( iter = 0 ; iter < numiter_b ; iter++ )
  {
    aa = gamma_a;
    bb = gamma_b;

    nd = 0;
    for (kk=0; kk < K; kk++)
    {
      if ( m_NumClassQ[kk] > 0 )
      {
	aa++;
      }
      nd += m_NumClassQ[kk];
    }
    xx = randbeta( gamma + 1.0, nd);
    zz = ( drand48() * (gamma + nd) < nd );

    aa -= zz;
    bb -= log(xx);

    gamma = randgamma(aa) / bb;
  }
  m_gamma = gamma;

  return 1;
}

// Update m_A, m_NumClassN from the predicted h
// Start of Ligation process
// and other SS does not have meaning anymore
int DDP::GetPredFreq( unsigned char*** h )
{
  int ii, ee, tt, k, jj;

  int		m_bLigationStep = 1;

  m_B.clear();
  //m_nSumClassN.clear();
  m_NumClassP.clear();

  int I = m_pData->m_numTotalI;

  if ( m_EqClassD == 0 )
  {
    m_pData->Alloc2DMemory( &m_EqClassD, I, 2);
  }


  int		nstart = m_nBlockStart;
  int		nend = m_nBlockEnd;
  int		nlength = nend - nstart;

  for (ii=0; ii<I; ii++)
  {
    for (ee=0; ee<2; ee++)
    {
      k = Find( &( h[ee][ii][nstart] ), m_B );
      if ( k < 0 )
      {
	vector<unsigned char> hnew;
	for (tt=0; tt<nlength; tt++)
	  hnew.push_back( h[ee][ii][nstart+tt] );

	m_B.push_back( hnew );
	m_NumClassP.push_back(0);
	k = m_B.size() - 1;
      }
      m_EqClassD[ii][ee] = k;
      //m_nSumClassN[k] += 1;
      //m_NumClassN[k] += 1;
      //jj = m_pData->m_EthnicGroup[ii];
      //	to update the SS of each DP
    }
  }

  return 1;
}

//find class k which is equal to H
int DDP::Find( unsigned char *h, vector<vector<unsigned char> > &A,
    int exceptK  )
{
  if ( A.size() == 0 )
    return -1;

  int		kk, tt;
  int		T = A[0].size();
  int		K = A.size();
  int		found = 0;

  for (kk=0; kk<K; kk++)
  {
    if ( kk != exceptK )
    {
      for (tt=0; tt<T; tt++)
      {
	if ( h[tt] != A[kk][tt] )
	  break;
      }
      if ( tt == T )
	break;
    }
  }

  if ( kk < K )
    return kk;
  else
    return -1;
}


int DDP::Swap( unsigned char *h1, unsigned char *h2, int nstart, int nend )
{
  int tt;
  unsigned char tmp;
  for (tt = nstart; tt < nend; tt++)
  {
    tmp = h1[tt];
    h1[tt] = h2[tt];
    h2[tt] = tmp;
  }

  return 1;
}

int DDP::ResetCumH()
{
  int **cum_h[B][2];
  int ii, bb, tt;

  for (bb=0; bb<B; bb++)
  {
    cum_h[bb][0] = m_pData->m_Cum_Haplotypes[bb][0];
    cum_h[bb][1] = m_pData->m_Cum_Haplotypes[bb][1];
  }

  int I = m_pData->m_numTotalI;
  for (bb=0; bb<B; bb++)
  {
    for (ii=0; ii<I; ii++)
    {
      for ( tt= m_nBlockStart; tt < m_nBlockEnd; tt++)
      {
	cum_h[bb][0][ii][tt] = 0;
	cum_h[bb][1][ii][tt] = 0;
      }
    }
  }

  return 1;
}


//TODO change it
int DDP::Initialize( haplo2_t h0, int I, int T, int offset, bool cpShiftedRand )
{
  int		ii, tt, ee, it, cc;
  int		nGroups = m_pData->m_nGroups;
  bool	new_class;

  // Geno-Haplo data
  assert( offset + T <= m_pData->m_numT );

  // Geno-Haplo data
  m_nBlockStart = offset;
  m_nBlockEnd = T + offset;
  m_nBlockLength = T;

  int nstart = offset;
  int nend = T + offset;

  int		numBlockT = m_nBlockLength;

  //  constants
  ab_h = beta_h + alpha_h;
  a1 = ( alpha_h ) / (ab_h );
  b1 = ( beta_h ) / (ab_h );
  ab_g = beta_g + alpha_g;
  a2	= (alpha_g) / (ab_g);
  b2	= (beta_g) / (ab_g);
  logB1 = log(B-1);
  logB = log(B);
  log_mu1=log(mu1);
  log_mu2=log(mu2);
  tiny = pow(10, -100); //10^(-100);

  unsigned char ***h = m_pData->m_Haplotypes;
  unsigned char ***g = m_pData->m_Genotypes;
  int	**g_match = m_pData->m_g_match;
  int	**g_miss1 = m_pData->m_g_miss1;
  int	**g_miss2 = m_pData->m_g_miss2;
  int	***h_count = m_pData->m_h_count;
  int jj =0; // d[0][0] =0

  vector<int> &q = m_NumClassQ;
  //vector<int> &sum_mj = m_nSumClassN;

  //////////////////////////
  // clear previous record
  clearSS();
  //////////////////////////

  // initialize with 1 class
  AddClass( 0, numBlockT, 0);

  //govind
  ////////////////////////////////////////////////
  //m_dp.clear();
  // create DP: in case of known ethnic group variable
  //for ( jj = 0; jj < nGroups; jj++ )
  //{
  //	m_dp.push_back( *(new DP()) );
  //}

  // initialize DP ethnic groups
  //for ( jj = 0; jj < nGroups; jj++ )
  //{
  //	m_dp[jj].m_pDataIndex = m_pData->m_DataInGroup[jj];
  //	m_dp[jj].m_AddClass( 0, numBlockT );
  //}

  if ( m_EqClassD != 0 )
  {
    delete [] m_EqClassD[0];
    delete [] m_EqClassD[1];
    delete [] m_EqClassD;
  }
  m_pData->Alloc2DMemory( &m_EqClassD, I, 2);
  int K = m_NumClassQ.size();
  m_pData->Alloc2DMemory( &m_NumClassM, m_A.size(), numBlockT );

  // initial assignment
  for ( ii = 0; ii < I ; ii++ )
  {
    if ( cpShiftedRand )
    {
      for ( tt = nstart; tt < nend; tt++)
      {
	float aa = rand()/(float)RAND_MAX;
	int aa1 = 0;
	if ( aa > 0.5 )		aa1 = 1;

	h[0][ii][tt] = h0[aa1][ii][tt];
	h[1][ii][tt] = h0[1-aa1][ii][tt];
	if ( h[0][ii][tt] == 255 )
	{
	  h[0][ii][tt] = 0;
	  h[1][ii][tt] = 0;
	}
      }
    }
    else
    {
      // init h
      for (tt = nstart; tt < nend; tt++)
      {
	h[0][ii][tt] = h0[0][ii][tt - nstart];
	h[1][ii][tt] = h0[1][ii][tt - nstart];
	if ( h[0][ii][tt] == 255 )
	{
	  h[0][ii][tt] = 0;
	  h[1][ii][tt] = 0;
	}
      }
    }

    // init g_match, g_miss1, g_miss2
    for (tt = nstart; tt < nend; tt++)
    {
      int g_id0 = MIN( g[0][ii][tt], g[1][ii][tt] );
      int g_id1 = MAX( g[0][ii][tt], g[1][ii][tt] );
      int h_id0 = MIN( h[0][ii][tt], h[1][ii][tt] );
      int h_id1 = MAX( h[0][ii][tt], h[1][ii][tt] );

      if ( g_id0 == h_id0 && g_id1 == h_id1 )
      {
	g_match[ii][tt] = 1;
	g_miss1[ii][tt] = g_miss2[ii][tt] = 0;
      }
      else if ( g_id0 != h_id0 && g_id1 != h_id1 )
      {
	g_miss2[ii][tt] = 1;
	g_miss1[ii][tt] = g_match[ii][tt] = 0;
      }
      else
      {
	g_miss1[ii][tt] = 1;
	g_miss2[ii][tt] = g_match[ii][tt] = 0;
      }
    }
  }

  // calculate u[0:2] for each DP
  CalNumClassU( );

  // rand permutation of ii
  vector<int> iivec( I );
  for ( ii = 0; ii < I; ii++ )	iivec[ii] = ii;
  random_shuffle( iivec.begin(), iivec.end() );


  // initialization other variables
  vector<int> &c = m_EqClassC;
  int ** d = m_EqClassD;
  for ( it = 0; it < I ; it++ )
  {
    for ( ee = 0; ee < 2; ee++ )
    {
      ii = iivec[it];

      vector<int> 	&p	= m_NumClassP; //upper leve;
      int **m	= m_NumClassM; //upper leve;
      vector<vector<int> >	&l	= m_NumClassL; //lower level 
      vector<vector<int> >	*la = m_NumClassLA;
      int			*u	= m_NumClassU;
      int J = m_NumClassP.size();


      double alpha0 = 0.7;

      //////////////////////////////////
      ///(2) 1. Sample c(i,e) DP
      //bad 
      //vector<unsigned char> 	&bj = m_B[jj];
      int cc = Sample_C(m_B[jj], m, alpha0 ); 

      c[jj] = cc;
      q[cc]++; // the update is not delayed because cc cannot change. 
      /// 1. Sample d(i,e)
      jj = Sample_D_Init( h[ee][ii], l, alpha0);
      d[ii][ee] = jj;

      if ( jj < J )
      {
	p[ jj ] ++;
	//sum_mj[ cc ] ++;
	new_class = 0;

      }
      else
      {
	new_class = 1;
	// add class
	AddClass( 1, numBlockT, cc );	// top-level
	J++;
      } 

      // update LA
      for ( tt = nstart; tt < nend; tt++ )
      {
	la[ h[ee][ii][tt] ][jj][ tt-nstart ] ++;
      }

      m_B[jj] = Sample_B(new_class, m_B[jj], m_A[cc],h[ee][ii],
	  q[cc], p[jj], l[jj], la[0][jj], la[1][jj],
	  m[cc],  h_count[ee][ii],
	  m_b_count[jj], u, I);

      J = m_B.size();

      // Check the logic of m' the ss for theta probably some mistake in -1 or +1 when calculating P_C_AB


      //
      for ( tt = nstart; tt < nend; tt++)
      {
	la[ h[ee][ii][tt] ][jj][ tt-nstart ] --;
      }

      ////////

      // sample the haplotype "h"
      Sample_H( h[ee][ii], h[1-ee][ii],
	  g[0][ii], g[1][ii], m_B[jj],
	  g_match[ii], g_miss1[ii], g_miss2[ii],
	  p[jj], l[jj], la[0][jj], la[1][jj],
	  h_count[ee][ii],
	  u, I);
    }	// end of e

  }	// end of i

  return 1;
}

int DDP::InferHaplotypes( GenoHaploDB* pDB, int nstart, int nend )
{
  // do adaptive number of iterations by checking convergence (default)
  if ( m_bCheckConvg )
  {
    InferHaplotypesAdaptive( pDB, nstart, nend	 );
  }
  // do fixed number of iterations
  else
  {
    Initialize( pDB, nstart, nend );
    Iterate_det_Gibbs_Met( m_nBurninIteration );
    Iterate_cum_Gibbs_Met( m_nCumIteration );
  }

  return 1;
}

int DDP::InferHaplotypesAdaptive( GenoHaploDB* pDB, int nstart, int nend )
{
  int ii, tt;

  /// 1. init
  Initialize( pDB, nstart, nend );

  /// 2. burnin iteration
  Iterate_det_Gibbs_Met( m_nBurninIteration );

  int I = m_pData->m_numTotalI;

  int perIter = MIN( 200*m_nThining, m_nCumIteration );
  int Maxiter = MAX( (int) m_nCumIteration / perIter, 1 );
  m_traceDiff.clear();

  /// 3. first cum iteration
  ////   - will do multiple cum iterations until convergence
  Iterate_cum_Gibbs_Met( perIter );

  unsigned char **pred_h[2];
  pred_h[0] = m_pData->m_Pred_Haplotypes[0];
  pred_h[1] = m_pData->m_Pred_Haplotypes[1];
  unsigned char **predh_old[2];
  m_pData->Alloc2DMemory( &(predh_old[0]), I, m_nBlockLength );
  m_pData->Alloc2DMemory( &(predh_old[1]), I, m_nBlockLength );

  for (ii=0; ii<I; ii++)
  {
    memcpy( predh_old[0][ii], &(pred_h[0][ii][nstart]),
	sizeof(unsigned char)*m_nBlockLength );
    memcpy( predh_old[1][ii], &(pred_h[1][ii][nstart]),
	sizeof(unsigned char)*m_nBlockLength );
  }

  int nTotSite = I*m_nBlockLength*2;
  float thr = nTotSite * 0.01;		// set threshold for convergence check
  int nConvBl = 0, iter = 0;
  ////    - repeat cum iteration until convergence
  while ( iter++ < Maxiter )
  {
    Iterate_cum_Gibbs_Met( perIter );
    Sample_Pred();
    int mat1=0, mat2=0;
    for ( ii=0; ii<I; ii++)
    {
      for ( tt = nstart; tt < nend ; tt++ )
      {
	if ( pred_h[0][ii][tt] == predh_old[0][ii][tt-nstart] )
	  mat1++;
	if ( pred_h[1][ii][tt] == predh_old[1][ii][tt-nstart] )
	  mat1++;
	if ( pred_h[0][ii][tt] == predh_old[1][ii][tt-nstart] )
	  mat2++;
	if ( pred_h[1][ii][tt] == predh_old[0][ii][tt-nstart] )
	  mat2++;
      }
    }
    int diff = nTotSite - MAX( mat1, mat2 );

    // converged if below threshold twice
    if ( diff < thr )
    {
      if ( nConvBl == 0 ) nConvBl++;
      else break;
    }
    else
    {
      if ( nConvBl == 1 ) nConvBl = 0;
    }
    // copy old value
    for (ii=0; ii<I; ii++)
    {
      memcpy( predh_old[0][ii], &(pred_h[0][ii][nstart]),
	  sizeof(unsigned char)*m_nBlockLength );
      memcpy( predh_old[1][ii], &(pred_h[1][ii][nstart]),
	  sizeof(unsigned char)*m_nBlockLength );
    }
  }

  // compute predictive estimation
  Sample_Pred();

  return 1;
}

int DDP::InferHaplotypes(haplo2_t h0, int* popLabel,
    int I, int T, int offset, bool* bDone )
{
  Initialize( h0, I, T, offset );

  float tmpg = m_gamma;
  int tmpDoCP = m_doConparam;
  m_doConparam = 1;

  Iterate_det_Gibbs_Met( m_nBurninIteration, bDone );
  Iterate_cum_Gibbs_Met( m_nCumIteration, bDone );

  m_gamma = tmpg;
  m_doConparam = tmpDoCP;

  return 1;
}

///////////////// estimate mutation rate ////////////////
int DDP::EstimateTheta( int iter )
{
  vector<float> theta;
  int K = m_A.size();
  int  kk, tt;

  vector<int> &nn = m_NumClassQ;;
  int  **m = m_NumClassM;
  int T = m_A[0].size();

  float meanTheta = m_pData->EstimatedTheta( nn, m, theta, T );

  if ( m_pFPTheta )
  {
    fprintf( m_pFPTheta, "%d %.4f	", iter, meanTheta );
    for (tt=0; tt<T; tt++)
      fprintf( m_pFPTheta, "%.4f	", theta[tt] );

    int numPopK = 0;
    for (kk=0; kk<K; kk++)
    {
      if ( nn[kk] > 0 )
	numPopK ++;
    }
    fprintf( m_pFPTheta, "%d %d\n", numPopK, K );
  }
  return 1;
}


int DDP::LoadData(const char *filename)
{
  int  t;
  FILE *fp = fopen( filename, "r" );

  char oneline[5000];
  while ( !feof(fp) )
  {
    fgets( oneline, 5000, fp );
    for (t=0; ;t++)
    {

    }

  }


  fclose(fp);
  return 1;
}


//int DDP::DeleteSS()
//{
//m_NumClassL.clear();
//m_NumClassLA[0].clear();
//m_NumClassLA[1].clear();

//return 1;
//}

int DDP::DeleteClass()
{
  assert ( m_NumClassP.size() > 0 );
  m_NumClassP.pop_back();
  m_Remove_Class.pop_back();
  m_B.pop_back();
 int temp = m_NumClassL.size(); 
  m_NumClassL.pop_back();
  m_NumClassLA[0].pop_back();
  m_NumClassLA[1].pop_back();
  m_EqClassC.pop_back();
  assert(temp == m_NumClassL.size() + 1);
  cout << "2467: old size m_NumClassL"<< temp<< "new " << m_NumClassL.size() << endl; 
  m_b_count.pop_back();

  return 1;
}


//////////////////////////////////////////////Disease block///////


////////////////////////////////////////////////////////////////////
////Sample C
//changes
///old: new
//l : m
//n : q
//h:b
///////////////////////////////////////////////////////////////////

// this will not be used
//int DDP::Sample_C_Init( vector <unsigned char>  &bj, 
//vector<vector<int> > &m, double alpha0)
//{
//int cc, kk;
//// q = number of bad ancestor for a given good ancestor
//vector<int> &q = m_NumClassQ;
//int K = q.size();
////double a_nsumg = alpha0 / ( sum( n ) + m_gamma ) ;

//// DP_vec stores discrete prob dist of phi's (each \phi denotes random variable associated with the color varying from 1..K+1, K+1 is new color, it is associated with lower urn) //sum(DP_Vec) = 1
//// alpha0 = tau
//vector<double>	DP_vec;

////this is proababilty of choosing old color.
//for (kk=0; kk < J; kk++)
//{
//DP_vec.push_back( q[kk] + alpha0 );
//}
////there is no probability for choosing new color, this implies
////no new good ancestor can be chosen because of bad ancestors. 

//normalize( DP_vec );

//cc = sample_discrete( DP_vec );

//// determine whether from bottom level or from top level

//return cc;
//}

int DDP::Sample_C(vector <unsigned char>  &bj,
    int **m, double alpha0)
{
  int cc;
  int kk, tt;
  vector<int> &q = m_NumClassQ;
  int K = q.size();
  // ph | a
  //double *pbt = new double[K];
  int qMinusM;
  vector<double>	DP_vec;
  for ( kk = 0 ; kk < K ; kk++ ){
    double pbt = q[kk] + alpha0;
    for (tt = 0; tt < m_nBlockLength; tt++){
      if ( m_A[kk][tt] == bj[tt] )
	pbt = pbt*(alpha_mut + m[kk][tt]);
      else{
	if (q[kk] !=0) 
	  qMinusM = q[kk]-m[kk][tt];
	else 
	  qMinusM = 0;
	pbt = pbt* (beta_mut + qMinusM);
      }

    }
    DP_vec.push_back(pbt);
  }
  normalize( DP_vec );
  for(int ii=0;ii<DP_vec.size(); ii++)
  {
    cout<< DP_vec[ii]<<":" ;
    assert(DP_vec[ii] == DP_vec[ii]);

  }
  cout <<endl;
  //double sumDP = sum(DP_vec);
  //cout<<"sumDP:" << sumDP<<endl;
  //assert((sumDP < 1+ tiny)  &&  (sumDP > 1- tiny));  
  //assert(sumDP ==1);  


  cc = sample_discrete( DP_vec );
  cout << "cc: " << cc <<endl;




  //for ( kk = 0 ; kk < K ; kk++ )
  //{
  //double	mc = log( q[kk] + ab_h );
  //double	mc1 = mc + logB1;
  //log_ph[kk] = 0;
  //if ( q[kk] != 0 )
  //{
  //double log_pht;
  //for (tt = 0; tt < m_nBlockLength; tt++)
  //{
  //if ( m_A[kk][tt] == bj[tt] )
  //log_pht = log( alpha_mut + m[kk][tt] ) - mc;
  //else
  //log_pht = log( beta_mut + q[kk] - m[kk][tt] ) - mc1;
  //log_ph[kk] += log_pht;
  //}
  //log_ph_temp[kk] = log_ph[kk];
  //}
  //else
  //{
  //log_ph[kk] = -INF;
  //log_ph_temp[kk] = INF;
  //}
  //}

  //double *log_DP_predict = new double[K];
  //double *log_DP_predict_temp = new double[K];
  //double min_DP_predict = INF;
  //
  //// DP_vec
  //vector<double>	DP_vec;
  //DP_vec.assign( K, 0 );
  //int sumn = (int)sum<int>( q );
  ////cout << alpha0 << endl;
  //
  //cout << "q: ";
  //for ( kk = 0; kk < K; kk++)
  //{
  //DP_vec[kk] = q[kk] + alpha0;
  //cout << q[kk]<<":";
  //}
  //cout<<endl;	
  //
  //normalize( DP_vec );
  //cout<< " Normalize DP_vec  : " ;
  //for(int ii=0;ii<DP_vec.size(); ii++)
  //{
  //cout<< DP_vec[ii]<<":" ;
  //assert(DP_vec[ii] == DP_vec[ii]);
  //}
  //cout<<endl;	
  //
  //for ( kk = 0; kk < K; kk++ )
  //{
  //double logdp		= log( DP_vec[kk] + tiny );
  //
  //log_DP_predict[kk]	= log_ph[kk] + logdp;
  //log_DP_predict_temp[kk] = log_ph_temp[kk] + logdp;
  //
  //if ( log_DP_predict_temp[kk] < min_DP_predict )
  //min_DP_predict = log_DP_predict_temp[kk];
  //}
  //
  //vector<double> DP_predict;
  //for ( kk = 0; kk < K; kk++ )
  //{
  //double tmp = log_DP_predict[kk] - min_DP_predict;
  ////printf("tmp %f\q", tmp);
  //if ( tmp < -30 ) tmp = 0;
  //else	tmp = exp( tmp );
  ////printf("dp_predict %f\q", tmp);
  //
  //DP_predict.push_back( tmp  );
  //}
  //	
  //normalize( DP_predict );
  //	
  //cout<< "DP_predict  : " ;
  //for(int ii=0;ii<DP_predict.size(); ii++)
  //{
  //cout<< DP_predict[ii]<< ":" ;
  //assert(DP_predict[ii] == DP_predict[ii]);
  //}
  //cout<<endl;	
  //
  //cc = sample_discrete( DP_predict );
  //	
  //cout<< " Normalize DP_predict  : " ;
  //for(int ii=0;ii<=DP_predict.size(); ii++)
  //{
  //cout<< DP_predict[ii]<< ":" ;
  //assert(DP_predict[ii] == DP_predict[ii]);
  //}
  //cout<<endl;	
  //	
  //cout << "cc: " << cc <<endl;
  //
  //
  //
  //if ( log_ph ) delete [] log_ph;
  //if ( log_ph_temp ) delete [] log_ph_temp;
  //if ( log_DP_predict ) delete [] log_DP_predict;
  //if ( log_DP_predict_temp ) delete [] log_DP_predict_temp;

  return cc;
}

///there is no requirement of sampling A. As this is fixed. 
//It needs to read each time. 

//////////////////////////////////////////////////////
//	Sample B
//		h: haplotype to be sampled
//		h1: complementary haplotype to h
//		g0, g1: genotypes
//		ak: ancestral haplotype for h to be inherited
//		g_match, g_miss1, g_miss2: ss for H->G observation
//		mk, lk, lak0, lak1, h_count, u: ss
//		Ij: the number of individuals of group j
//
//		Changes
//		 h:b 
//               h1:Delted
//               g0,g1 : h, deleted
//               ak: 
//		mk: qk
//		lk: lk
//		lak0, lak1:lak0 lak1 ..... ss for phi (b,d,h)
//		h_count, TODO 
//               Ij:
//               nk:qk
//
//
//               ss for theta: m
//               ss for phi: l
//
//               extra: mk: sufficient statistics for theta(a,c) 
//
// TODO remember to remove contribution of b from ss
// TODO change ealier B to bb
// see the
//
// laj[0][tt] = \sum_i  I(h_it = 0) I(d_i =j)
///////////////////////////////////////////////////////
vector<unsigned char> DDP::Sample_B(bool new_class,
    vector<unsigned char>  &bj,
    vector<unsigned char>  &ak,
    unsigned char  *hj,
    int qk, int pj, vector<int> &lj, vector<int> &laj0, vector<int> &laj1,
    int *mk,
    int *h_count, vector<int> &b_count,
    int *u, int I)
{
  int tt;
  int numBlockT = m_nBlockLength;
  int nstart = m_nBlockStart;
  int	nend = m_nBlockEnd;
  int minh, maxh, ming, maxg;
  vector<unsigned char> temp_bj;

  vector<int> &q = m_NumClassQ;
  int K = q.size();

  double log_a2 = log(a2);
  double log_b2 = log(b2);
  int mdelb;
  //p(b/h)
  vector<double> pB_AH;
  if (!new_class){
    //old b
    for(tt = 0; tt< numBlockT; tt++){
      double log_pB_H, log_pA_B0, log_pA_B1;


      int mdelb;
      if (bj[tt] == ak[tt]){
	mdelb =  1;
      }
      else{
	mdelb = 0;
      }

      if ( ak[tt]=='0'){ 
	log_pA_B0 = log(alpha_mut  + mdelb + 1);
	log_pA_B1 = log(beta_mut  + qk - mdelb);
      }
      else{
	log_pA_B1 = log(alpha_mut  + mdelb + 1);
	log_pA_B0 = log(beta_mut  + qk - mdelb);
      }

      // denomiator is common see old b in doc
      log_pB_H = lgamma(alpha_h  + laj0[tt])  +  lgamma(beta_h + pj - laj0[tt]) -
	lgamma(pj+alpha_h + beta_h) * logB1;


      pB_AH.push_back(exp(log_pB_H + log_pA_B0));

      log_pB_H = lgamma(alpha_h + laj1[tt])  +  lgamma(beta_h + pj - laj1[tt])  -
	lgamma(pj+alpha_h + beta_h) * logB1;
      pB_AH.push_back(exp(log_pB_H + log_pA_B1));


      normalize(pB_AH);
      int bt = sample_discrete(pB_AH);
      if ( bt < 0 && bt < B)  assert(0);
      temp_bj.push_back((unsigned char)bt);
    }

  }
  else{
    //new b
    for(tt = 0; tt< numBlockT; tt++){
      double log_pB0_H, log_pB1_H, log_pA_B0, log_pA_B1;


      if (bj[tt] == ak[tt]){
	mdelb = 1;
      }
      else{
	mdelb = 0;
      }

      if ( ak[tt]=='0'){ 
	log_pA_B0 = log(alpha_mut  + mdelb + 1);
	log_pA_B1 = log(beta_mut  + qk - mdelb);
      }
      else{
	log_pA_B1 = log(alpha_mut  + mdelb + 1);
	log_pA_B0 = log(beta_mut  + qk - mdelb);
      }


      if (hj[tt] == '0') {
	log_pB0_H = log(alpha_h);
	log_pB1_H = log(beta_h);
      }
      else{
	log_pB0_H = log(beta_h);
	log_pB1_H = log(alpha_h);
      }


      pB_AH.push_back(exp(log_pB0_H + log_pA_B0));
      pB_AH.push_back(exp(log_pB1_H + log_pA_B1));


      normalize(pB_AH);
      double bt = sample_discrete(pB_AH);
      //changed B to B.g to remove ambiguity between bad ancestor and number of alleles. 
      if ( bt < 0 && bt < B)  assert(0);
      temp_bj.push_back((unsigned char)bt);
    }

  }


  //(4 )  udpdate q,  m, l, b_count 
  for(tt = 0; tt< numBlockT; tt++){

    if ( ak[tt] == bj[tt] ){
      b_count[tt] = 1;
    }
    else{
      b_count[tt] = 0;
    }
    mk[tt] += b_count[tt];
  }
  //if (bk[tt]== m_A[cc][tt- m_nBlockStart]) mk[tt] += 1;

  return temp_bj;
}


void DDP::InitializeQ(int K)
{
  m_NumClassQ.clear();
  for(int kk=0; kk< K; kk++)
    m_NumClassQ.push_back(0);
}
