"""
@author Erik M Volz
@date Dec 30 2011
@version 0.1.02

export LD_LIBRARY_PATH=/usr/local/lib

uses fast C plugins from likhelper

Calculate the likelihood of a gene genealogy conditional on a demographic history. 

"""
__version__='0.1.02'

from pylab import *
import likhelper
import networkx, pdb, csv, time, sys, numpy, datetime
from Bio import Phylo
from cStringIO import StringIO
from scipy.integrate import odeint, ode
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d



class RunningTimeExceededLimitException(Exception):
	pass
class TreeIncompatibleWithModelException(Exception):
	pass

class Genealogy(networkx.DiGraph):
	"""
	Genealogy(nwkstring, sampleTimes, sampleStates)
	
	Class extending networkx.DiGraph
	Contains tree topology, node heights, and branch lengths.
	Initialize with taxon metadata, sample dates, and states of taxa when sampled.
	Each node has attribute height, which is relative to most recently sampled taxon. 
	
	Parameters
	----------
	nwk : string
		Newick format describing tree topology
	sampleTimes, sampleStates : dict
		keys are taxon names corresponding to values in nwk. sampleTimes
		have values type float with date of taxon. Date can be 
		relative to any time. Dates are transformed into heights 
		relative to the most recent sample. sampleStates have values 
		type array(float). 
	nwkstrings : list
		contains strings, each of which is a Newick format tree
	
	Methods
	-------
	extant_lineages_in_interval(h0, h1) : h0 and h1 are type float, h0 < h1, representing time in past relative to most recent sample. 
		Returns list of branches from Genealogy.
	"""
	number_of_taxa =0
	NSTATES = None
	def __init__(self, nwkstring, sampleTimes, sampleStates):
		"""
		sampleTimes and sampleStates are dictionaries keyed by taxon names in the newick
		"""
		self.NSTATES = len(sampleStates.values()[0])
		
		super(Genealogy, self).__init__(self)
		
		self.sampleTimes = sampleTimes
		self.sampleStates = sampleStates
		self.nwk = nwkstring
		self.taxa = list() #terminal nodes
		
		#nwk > DiGraph with BioPython
		self.nwkstrings = nwkstrings = nwkstring.split(';')
		self.biopython_phylos = biopython_phylos= list()
		for nwk in nwkstrings:
			nwk = nwk.strip()
			
			if len(nwk)>0 and nwk!="\n":
				if nwk[-1]!=';':
					nwk+=';'
				failed = True
				try:
					biopython_phylo = Phylo.read(StringIO(nwk), "newick")
					biopython_phylos.append(biopython_phylo)
					failed = False
				except: # the newick is empty or otherwise corrupt
					print nwk
					#~ pass
					
				if not failed:
					digraph = Phylo.to_networkx(biopython_phylo)
					for u,v in digraph.edges():
						if u.name is not None:
							u.name = u.name.strip('\'')
						if v.name is not None:
							v.name = v.name.strip('\'')
						#
						if u.is_parent_of(v):
							self.add_edge(u,v)
						else:
							self.add_edge(v,u)
					#
				#~ self.add_edges_from(digraph.edges())
				
		# add heights, sample times , and states
		self.max_sample_time = max_sample_time = max(sampleTimes.values())
		for node in self.nodes():
			if node.is_terminal():
				self.taxa.append(node)
				self.number_of_taxa += 1
				try:
					node.sample_time = sampleTimes[node.name]
				except:
					node.sample_time = int(node.name.split('_')[-1]) # sometimes the date is encoded in the name
				node.state = sampleStates[node.name]
				assert len(node.state)==self.NSTATES
				node.height = max_sample_time - node.sample_time
		#
		nupdates = 1
		niters = 0
		while nupdates > 0:
			niters+=1
			if niters > 10000:
				raise TreeIncompatibleWithModelException("%i" % nupdates)
			#
			nupdates = 0
			for u,v in self.edges():
				if v.__dict__.has_key('height') and not u.__dict__.has_key('height'):
					nupdates+=1
					u.height = v.height + v.branch_length
		#
		
		# july 9 add root edges
		for node in self.nodes():
			if self.in_degree(node)==0:
				clades = [clade for clade in node]
				rootclade = Phylo.BaseTree.Clade( branch_length=1., name='root', clades= clades) #max_sample_time - node.height
				rootclade.height = node.height + 1 #max_sample_time #retrospectively, the beginning of the epidemic
				self.add_edge(rootclade, node)
		#
		
		
		
		# april 11 2012, nlft function
		coalescent_or_sample_times  = [ (parent.height, child.height) for parent,child in self.edges() if self.out_degree(parent)==2]
		coalescent_or_sample_times = self.coalescent_or_sample_times = sort( unique( ravel(coalescent_or_sample_times) )   )
		self.ctimes = sort(unique([node.height for node in self.nodes() if (not node.is_terminal()) and self.in_degree(node) > 0]))
		stvalues = self.stvalues = [taxon.height for taxon in self.taxa] #sampleTimes.values()
		nsampled = list()
		lastA =0
		A = list()
		tlist = list()
		tdelta = 1.
		for t in coalescent_or_sample_times:
			tlist += [t]
			nsampled.append(stvalues.count(t))
			if nsampled[-1] > 0:
				A += [lastA + nsampled[-1]]
			else:
				A += [lastA - 1]
			#
			lastA = A[-1]
		#
		self.nlft_interp = interp1d(tlist, A, bounds_error = False, fill_value = 1.)
		self.rootHeight = coalescent_or_sample_times[-1]
		
		btaxis = arange(0., self.rootHeight+1)
		self.nlft_lookup = dict(zip(btaxis, self.nlft_interp(btaxis))) # 
	#
	
	def rescale_node_heights(self, h_final, A_final):#ctime_model):
		# rescales node heights to match those from model as closely as possible 
		# also update nlft functions; does not currently change branch lengths; 
		
		#b = dot(ctime_model, self.ctimes[:len(ctime_model)]) / dot(self.ctimes[:len(ctime_model)], self.ctimes[:len(ctime_model)])
		
		nlftkeys = sort(self.nlft_lookup.keys())[::-1]
		for h in nlftkeys:
			h_i = h
			if abs(self.nlft_lookup[h] - A_final) < 1:
				break
		#
		b = h_final / h_i
		
		
		ctimes = list()
		for node in self.nodes():
			if node.is_terminal():
				continue
			node.height *= b
			
		#
		
		print 'b', b
		
		# in rare instances parent.height < child.height; fix this: 
		done = False
		niter = 0
		while not done:
			niter +=1 
			if niter > 10000:
				raise TreeIncompatibleWithModelException("rescale_node_heights(self, h_final, A_final): %i" % niter)
			#
			done=True
			for parent, child in self.edges():
				if parent.height < child.height:
					print parent.height, child.height
					x = child.height + child.height - parent.height
					try:
						grandparent = self.predecessors(parent)[0]
						if grandparent.height < x and grandparent.height > child.height:
							parent.height = (child.height + grandparent.height)/2.
						else:
							parent.height = x
					except:# grandparent must be root
						if self.rootHeight < x and self.rootHeight > child.height:
							parent.height = (child.height + grandparent.height)/2.
						else:
							parent.height = x
					done = False
			#
		#
		
		for node in self.nodes():
			if self.in_degree(node)==1:
				ctimes.append(node.height)
		#
		self.ctimes = ctimes
		self.coalescent_or_sample_times = sort(unique( self.stvalues + ctimes ))
		self.rootHeight = self.coalescent_or_sample_times[-1]
		
		# reinitialize nlft lookup functions
		lastA =0
		A = self.A_list = list()
		hlist = list()
		hdelta = 1.
		for h in self.coalescent_or_sample_times:
			hlist += [h]
			n_h = self.stvalues.count(h)
			if n_h > 0:
				A += [lastA + n_h]
			else:
				A += [lastA - 1]
			#
			lastA = A[-1]
		#
		self.nlft_interp = interp1d(hlist, A, bounds_error = False, fill_value = min(A))
		
		btaxis = arange(0., self.rootHeight+1)
		self.nlft_lookup = dict(zip(btaxis, self.nlft_interp(btaxis)))
	#
	
	
	def nlft_lookup_function(self, h):
		try:
			return self.nlft_lookup[round(h)]
		except TypeError:
			#~ return self.nlft_lookup_function_array(h)
			return array([self.nlft_lookup_function(hh) for hh in h])
		except KeyError:
			return 1.
	#
	
	
	
	def extant_lineages_in_interval(self, t1, t2):
		# returns branch with any overlap with interval [t1, t2)
		if t1 > t2:
			return list()
		#
		branches = list()
		for u,v in self.edges():
			if (u.height > t1 and u.height <= t2) or (v.height >= t1 and v.height < t2) or (u.height >= t2 and v.height <= t1):
				branches.append((u,v))
		#
		return branches
	#
#

class Likelihood(object):
	""" 
	Computes the likelihood of a gene genealogy conditional on a demographic history.
	
	Parameters
	----------
	taxis : array of length T, type float
		Gives the time corresponding to each matrix in births and migrations.
		*Must* have equally spaced values. 
	births and migrations : length T tuple of mXm arrays
	prevalence : TXm array, each row giving the population size
	genealogy : Genealogy
		each branch has keys for a number of lists: 
			heights-- the time axis for other variables 
			pik-- each element is m-array; pmf for state of branch
	
	Value
	-----
	log_likelihood : float
	lik_terms : list of type float
		Gives the likelihood of observing each internode interval. 
	
	
	Methods
	-------
	F_G_Y(t) : t float, time
		returns tuple of (mXm array type float, mXm array type float, m array type float).
		F is matrix of birth rates, G is matrix of migration rates, Y is array of population sizes. 
		These values are interpolated (linear) from the sequence of matrices provided on initialization. 
	"""
	def F_G_Y(self, t):
		tt = self.mostRecentSampleTime - t
		Y = array([self.interp_prevalence[k](tt) for k in range(self.m)]).flatten()
		dF =zeros( (self.m, self.m))
		dG = zeros( (self.m, self.m) )
		for k,l in self.nonZeroElements:
			try:
				dF[k,l] = self.interp_cum_births[(k,l)].derivatives(tt)[1]
			except: # the derivative is not defined 
				dF[k,l] = 0.
			try:
				dG[k,l] = self.interp_cum_migrations[(k,l)].derivatives(tt)[1]
			except:
				dG[k,l] = 0.
		#
		
		
		return abs(dF), abs(dG), abs(Y)
	#
	
	
	def _state_at_internal_node_and_likelihood(self, internalnode, childnodes, birth, prev, A, S):
		""" july 20 2011
		"""
		g = self.genealogy; m = self.m
		
		c1,c2 = childnodes
		try:
			lastpik1 = abs(g[internalnode][c1]['pik'][-1])
		except:
			pass
		lastpik2 = abs(g[internalnode][c2]['pik'][-1])
		
		X = zeros((m,m))
		lambdai = 0.
		lambdaj =0.
		Lambda =0.
		for k in range(m):
			for l in range(m):
				#~ X[k,l] = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
				#~ lambdai = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*A[l])
				#~ lambdaj = (birth[k,l]/prev[k]/prev[l]) * (lastpik2[k]*A[l])
				#~ Lambda = (birth[k,l]/prev[k]/prev[l]) * (A[k]*A[l])
				if k!=l or prev[l] < 1 or prev[k]<1:
					X[k,l] = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
					
				else:
					try:
						X[k,l] = (birth[k,l]/prev[k]/abs(prev[l]-1)) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
					except:
						X[k,l] = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
				#
		#
		rowsumX =  abs( sum(X, axis = 1)   )
		sumX = lambdaij = abs(  sum(rowsumX) )
		
		# update likelihood
		if S < 0:
			S = 0.
		if lambdaij < 0:
			lambdaij = 0.
		lik_term11 =  abs(S)  * abs(lambdaij )
		
		self.lik_terms.append(lik_term11)
		self.lik_terms11.append(lik_term11)
		
		# march 26 2012
		self.S_list.append(S)
		self.S = 1.
		
		
		if sumX==0:
			return lastpik1
		newpik = rowsumX / sumX  #pik
		return abs(newpik)
	#
	
	
	def _initialize_new_branches(self, branches, birth, prev, A, S):
		""" 
		A and S correspond to the point in time when the new branch occurs (going backwards), eg an internal node of the tree
		"""
		g = self.genealogy; m = self.m
		for branch in branches:
			parent, child = branch
			g[parent][child]['s'] = [ 1.]
			g[parent][child]['S'] = [1.]
			g[parent][child]['heights'] = [child.height]
			if child.is_terminal():
				g[parent][child]['pik'] = list()
				g[parent][child]['pik'].append( array(child.state) )
				
				#
				self.p_alpha_k = None
			else:# child is internal node
				self.p_alpha_k = self._state_at_internal_node_and_likelihood(child, self.genealogy.successors(child), birth, prev, A, S)
				g[parent][child]['pik'] = [ self.p_alpha_k ]  
			#
		#
		
	#
	
	max_time = inf #6 * 60 #120 # maximum running time of likelihood calculation
	max_deltat = None #max integration step before renormalization; smaller values give more stable estimate, run longer; 
	def __init__(self, taxis, prevalence, births, migrations, genealogy, rescaleTreeHeight=True, **kwargs):
		self.__dict__.update(**kwargs)
		self.start_time = time.time() # feb 3 2012
		
		self.log_likelihood = 0.
		m = self.m = len(prevalence[0])
		T = self.T = len(prevalence)
		n = self.n = genealogy.number_of_nodes()
		self.genealogy  = self.g = g = genealogy
		self.births     = copy(births)
		self.migrations = copy(migrations)
		self.prevalence = copy( prevalence)
		
		# march 24 2012
		self.maxTaxis = max(taxis) # used for the FGY function, translating tree heights into forward time axis
		self.mostRecentSampleTime = self.genealogy.max_sample_time  # should be corresponding day of the most recent sample on the taxis scale
		
		# march 26 2012
		nlft_k = self.nlft_k = list()
		nlft_k2 = self.nlft_k2 = list()
		S_list = self.S_list = list()
		
		#~ determine which elements are nonzero
		birthsMigrations = [b + m for b,m in zip(births,migrations)]
		birthsMigrations =sum(birthsMigrations, axis=0)
		self.nonZeroElements = list()
		for k in range(len(birthsMigrations)):
			for l in range(len(birthsMigrations)):
				if birthsMigrations[k,l] > 0 or birthsMigrations[l,k]>0:
					self.nonZeroElements.append((k,l))
		#
		
		
		# for interpolation of births,migrations, population size: 
		self.cum_births = [births[0]]
		self.cum_migrations = [migrations[0]]
		for b,m in zip(births[1:], migrations[1:]):
			self.cum_births.append( self.cum_births[-1] + b ) 
			self.cum_migrations.append( self.cum_migrations[-1] + m )
		#
		self.interp_cum_births = dict()
		self.interp_cum_migrations = dict()
		self.interp_prevalence = dict()
		for k in range(self.m):
			y = [p[k] for p in self.prevalence ]
			self.interp_prevalence[k] = InterpolatedUnivariateSpline(taxis, y)
			for l in range(self.m):
				b = [ bb[k,l] for bb in self.cum_births]
				m = [ mm[k,l] for mm in self.cum_migrations]
				self.interp_cum_births[(k,l)] = InterpolatedUnivariateSpline(taxis, b)
				self.interp_cum_migrations[(k,l)] = InterpolatedUnivariateSpline(taxis, m)
		#
		
		
		
		################################################################
		#~ april 21 2012 solve for A; rescale branch lengths so that nlft 
		#~ of model more closely matches nlft of tree; will work best for 
		#~ populations that initally grow exponentially; 
		if rescaleTreeHeight: 
			stkeys = g.sampleTimes.keys()
			sampleTimes = [g.sampleTimes[k] for k in stkeys]
			sampleStates = [g.sampleStates[k] for k in stkeys]
			sampleHeights = g.max_sample_time - array(sampleTimes)
			uSampleHeights = sort(unique(sampleHeights))
			
			newSampleStates = dict.fromkeys(uSampleHeights)
			for st, sh, ss in zip(sampleTimes, sampleHeights, sampleStates):
				try:
					newSampleStates[sh] += array(ss)
				except:
					newSampleStates[sh] = zeros(self.m)
					newSampleStates[sh] += array(ss)
			#
			
			AforS = list() 
			n = g.number_of_taxa
			nn =0.
			taxaSampled_list = list()
			taxaSampled_times = list()
			for sh in sorted(uSampleHeights):
				nn+=sum(newSampleStates[sh])
				taxaSampled_list.append( nn )
				taxaSampled_times.append(sh)
			#
			taxaSampled_cumsum = cumsum(taxaSampled_list)
			def taxaToBeSampled(t):
				last_ts = 0
				for tst, ts in zip(taxaSampled_times,taxaSampled_list):
					if t < tst:
						return n - last_ts
					#
					last_ts = ts
				#
				return n - last_ts
			# 
			
			# solve A 
			A0 = zeros(self.m)
			Alast = A0
			xA = self.A = [Alast]
			AforS = [ sum(newSampleStates[uSampleHeights[0]]) + taxaToBeSampled(uSampleHeights[0])  ]
			
			#~ "ddiiOOOOOOOO", &t0, &deltat, &m, &lengthNonZeroElements, &A0, &b0, &b1, &mig0, &mig1, &p0, &p1, &nonZeroElements
			A_taxis = sort( unique(concatenate((uSampleHeights , linspace(0., g.rootHeight, 100))) ) )
			
			for h0, h1 in zip(A_taxis[:-1], A_taxis[1:]): # don't forget the last interval
				A0=Alast
				if newSampleStates.has_key(h0):
					A0+=newSampleStates[h0]
				#
				t = array([h0, h1])
				#~ pdb.set_trace()
				
				F0,G0,Y0 = self.F_G_Y(t[0])
				F1,G1,Y1 = self.F_G_Y(t[-1])
				Alast = likhelper.solveA(t[0], t[-1]-t[0], len(A0), len(self.nonZeroElements), A0, F0, F1, G0, G1, Y0, Y1, array(self.nonZeroElements, dtype=float))
				
				#~ Atest.append(Alast)
				AforS.append( sum(Alast) + taxaToBeSampled(h1)  )
			#
			
			
			# make interpolators
			self.A_interp = interp1d(A_taxis, AforS, fill_value = 1, bounds_error = False )
			linesRemaining = self.linesRemaining= sum(AforS[-1])
			
			# rescale branch lengths (actually node heights); OLS with zero intercept,times to each coal;
			h_A_interp = interp1d(AforS, A_taxis, bounds_error = False, fill_value=max(A_taxis))
			ctime_model = interp(range(int(max(AforS))-1, int(min(AforS))-1, -1), AforS[::-1], A_taxis[::-1])
			self.genealogy.rescale_node_heights(A_taxis[-1], self.linesRemaining)#ctime_model)
		################################################################
		
		
		# reorganize variables for reverse time axis
		r_t_i = argmin(abs(g.max_sample_time-taxis)) + 1
		r_t = height = self.height = self.heights =  taxis[:r_t_i]
		# should always inlude zero aligned with g.max_sample_time
		
		self.lik_terms = list()
		self.lik_terms11 = list()
		
		coalescent_or_sample_times = [ (parent.height, child.height) for parent,child in g.edges() ] 
		coalescent_or_sample_times = unique( ravel(coalescent_or_sample_times) )  
		
		#~ print 'print len(coalescent_or_sample_times)'
		#~ print len(coalescent_or_sample_times)
		
		A = zeros(self.m)
		self.S = 1. 
		
		mrca_reached = False
		
		for i, h0, h1 in zip(range(len(coalescent_or_sample_times)-1), coalescent_or_sample_times[:-1], coalescent_or_sample_times[1:]):
			#~ feb 3 2012
			if (time.time() - self.start_time) > self.max_time:
				#~ abort bc is taking too long; unlikely to be a good solution
				raise RunningTimeExceededLimitException(r"for i, h0, h1 in zip(range(len(coalescent_or_sample_times)-1), coalescent_or_sample_times[:-1], coalescent_or_sample_times[1:]):")
			#
			branches = g.extant_lineages_in_interval(h0, h1)
			
			if len(self.lik_terms)==g.number_of_nodes()-1 and len(branches)==1: 
				mrca_reached = True
				break 
			#
			
			F0,G0,Y0 = self.F_G_Y(h0)
			
			
			newbranches =  [branch for branch in branches if branch[1].height == h0] 
			self._initialize_new_branches(newbranches, F0, Y0, A, self.S)
			
			# update other branches at node; finite size corrections; T# update other branches at node; finite size corrections;  needs p_alpha_k
			if self.p_alpha_k is not None: 
				oldbranches = [branch for branch in branches if branch[1].height!=h0]
				self._update_old_branches(oldbranches, Y0)
			
			status2 , A, self.S = self._update_branches(branches,  h0, h1)
			if self.n_lineages_exceeds_population_size:
				break
			if status2 < 0:
				break # less than two extant lineages or population size less than nlft
			#
			
			# record A at height h
			nlft_k.append((h1,A))
		#
		
		if self.n_lineages_exceeds_population_size:
			self.log_likelihood = -inf
		else:
			try:
				self.log_likelihood =  sum(log(self.lik_terms))
				if self.log_likelihood==0: # will happen if lik_terms==[]
					self.log_likelihood = -inf
			except:
				self.log_likelihood = -inf
		#
		if isnan(self.log_likelihood):
			self.log_likelihood = -inf
	#
	
	def _update_old_branches(self, oldbranches, Y0):
		for parent,child in oldbranches:
			p_i = self.g[parent][child]['pik'][-1]
			fterm = self.p_alpha_k * p_i * (Y0-1)/(Y0-p_i)
			stermmat = repeat(self.p_alpha_k * Y0 / (Y0-p_i), self.m).reshape((self.m, self.m)).T
			stermmat[ diag_indices_from(stermmat) ] = 0.
			sterm = p_i * sum(stermmat, axis=1)
			self.g[parent][child]['pik'][-1] = fterm + sterm
	#
	
	n_lineages_exceeds_population_size= False
	def _update_branches(self, branches, lower_h, upper_h):
		# updates state of branches at height upper_h
		g = self.genealogy; m = self.m
		
		self.nbranches = nbranches = len(branches)
		
		if nbranches < 1:
			return -2, 1, 1
		#
		
		# make initial conditions
		A0 =zeros(self.m)
		for branch in branches:
			parent, child = branch
			pik = g[parent][child]['pik'][-1]
			A0+=pik
		#
		
		Q0 = eye(self.m)
		S0 = self.S
		AQS0 = concatenate((A0, ravel(Q0), [S0] ))
		x0 = copy(AQS0)
		
		
		# make solution intervals; each interval less than self.max_deltat
		#~ smaller deltat will give more stable solution; will take longer;
		if self.max_deltat is not None:
			t = linspace(lower_h, upper_h, 2 + floor((upper_h-lower_h)/self.max_deltat))
		else:
			t = linspace(lower_h, upper_h, 2 )
		
		for h0,h1 in zip(t[:-1], t[1:]):
			F0,G0,Y0 = self.F_G_Y(h0)
			F1,G1,Y1 = self.F_G_Y(h1)
			AQS1 = likhelper.solveAQS(h0, h1-h0, self.m, len(self.nonZeroElements), AQS0, F0, F1, G0, G1, Y0, Y1, array(self.nonZeroElements, dtype=float))
			AQS0 = AQS1
		#
		
		A1 = AQS1[:self.m]
		Q = AQS1[self.m:(self.m+self.m**2)].reshape((self.m, self.m))
		S1 = AQS1[-1]
		#~ c_end = time.time()
		
		# renormalization step to prevent numerical instabilities
		Q = abs(Q)
		rowsumQ = sum(Q, axis = 1)
		Q = Q / rowsumQ.reshape((len(Q),1))
		
		#update state of branches
		for i, branch in enumerate(branches):
			parent, child = branch
			oldpik = g[parent][child]['pik'][-1]
			newpik = abs( dot( Q.T , oldpik) )
			newpik = newpik / sum(newpik)
			
			g[parent][child]['pik'].append(newpik)
			g[parent][child]['heights'].append(upper_h)
			
			g[parent][child]['s'].append(1)
			g[parent][child]['S'].append(S1)
		#
		
		A1p =zeros(self.m)
		for branch in branches:
			parent, child = branch
			pik = g[parent][child]['pik'][-1]
			A1p+=pik
		
		if sum( A1p ) > sum( Y1 ) > 0:
			print 'Warning: lineages exceed population size.', A1p, Y1 
			self.n_lineages_exceeds_population_size = True
			return -1, A1p, abs(S1)
		#
		return 0, A1p, abs(S1)
	#
	
	#~ now handled in likhelper:
	#~ def qdot(self, x, t): 
#

def load_flatfile_TXmXm( fn):
	"""reads csv flatfile from fn with dimensions (m*T)Xm
		optionally, matrices can be separated by blank lines
	"""
	r = csv.reader(open(fn, 'r'))
	d = [row for row in r if len(row)>0]
	m = len(d[0])
	o = list()
	
	for i in range(len(d)  / m):
		b = array(  d[i*m:(i*m+m)] , dtype = float)
		o.append( b )
	#
	return o
#

def load_births( fn):# ie transmissions
	"""produces a length T tuple of mXm arrays
	reads csv flatfile from fn with dimensions (m*T)Xm
		optionally, matrices can be separated by blank lines
	"""
	return  load_flatfile_TXmXm(fn)
#
def load_migrations( fn):# ie stage transitions
	return load_flatfile_TXmXm(fn)
#
def load_taxis(fn):# single column of data, length T
	return numpy.loadtxt(fn, delimiter = ',')
#
def load_prevalence( fn):
	r = csv.reader(open(fn, 'r'))
	d = [row for row in r if len(row) > 0]
	return array(d, dtype=float)
#


if __name__=='__main__':
	usageString = """may 27 2011
	calculates log likelihood given various inputs stored on disk
	
	To use likhelper module, make sure to set: 
	export LD_LIBRARY_PATH=/usr/local/lib
	
	Usage:  python likelihoodC.py nwkFN sampleTimesFN sampleStatesFN taxisFN prevalenceFN birthsFN migrationFN
	python likelihoodC.py odesir_coalescentTest_tree.nwk odesir_coalescentTest_sampleTimes.csv odesir_coalescentTest_sampleStates.csv odesir_coalescentTest_time.csv odesir_coalescentTest_prev.csv odesir_coalescentTest_births.csv odesir_coalescentTest_migration.csv
	
	# for likelihood:  taxis, prevalence, births, migrations, genealogy
	# for genealogy:  nwkstring, sampleTimes (dict), sampleStates (dict)
	
	sampleTimes # file format should be csv nX2, (taxon id, value (float))
	sampleStates # file format should be csv nX(m+1), (taxon id, probability of state)
	
	taxis should be a single column type float
	prevalence should be csv with dimensions TXm, where T is the number of time points in taxis.
	births and migrations with m states should be csv table with dimensions (m*T)Xm , 
		where T is the number of time points.
		Optionally each of the T matrices can be separated by a blank line.
	
	"""
	try:
		nwkFN, sampleTimesFN, sampleStatesFN, taxisFN, prevalenceFN, birthsFN, migrationFN  = sys.argv[1:8]
		# genealogy
		nwk = open(nwkFN, 'r').read()
		# file format should be nX2, (taxon id, value (float))
		sampleTimes = dict( [ (row[0], float(row[1])) for row in csv.reader(open(sampleTimesFN, 'r'))  ]  )
		# file format should be nX(m+1), (taxon id, probability of state)
		sampleStates = dict( [ (row[0], array(row[1:],dtype=float) ) for row in csv.reader(open(sampleStatesFN, 'r'))  ]  )
		
		births = load_births(birthsFN)
		migrations = load_migrations(migrationFN)
		prevalence = load_prevalence(prevalenceFN)
		taxis = load_taxis(taxisFN)
		
		
		m = len(births[0])
		if m>1 and len(sampleStates.values()[0])==1:
			#then the taxon states are probably a point mass
			for k,v in sampleStates.items():
				s = zeros(m)
				s[int(v)] = 1.
				sampleStates[k] = copy(s)
		#
		
		
	except:
		print usageString
		raise
	
	
	
	genealogy = Genealogy(nwk, sampleTimes, sampleStates)
	
	#~ print 'start likelihood calc', time.ctime()
	l= Likelihood(taxis, prevalence, births, migrations, genealogy, rescaleTreeHeight=False)
	#~ print 'completed likelihood calc', time.ctime()
	
	print l.log_likelihood
#
