"""
@author Erik M Volz
@date Aug 16 2011
@version 0.1.02

Calculate the likelihood of a gene genealogy conditional on a demographic history. 
"""
__version__='0.1.02'

from pylab import *
import networkx, pdb, csv, time, sys, numpy
from Bio import Phylo
from cStringIO import StringIO
from scipy.integrate import odeint, ode
from scipy.stats import gmean,  hmean
from scipy.linalg import expm
from scipy.interpolate import InterpolatedUnivariateSpline


class Genealogy(networkx.DiGraph):
	"""
	Genealogy(nwkstring, sampleTimes, sampleStates)
	
	Class extending networkx.DiGraph
	Contains tree topology, node heights, and branch lengths.
	Initialize with taxon metadata, sample dates, and states of taxa when sampled.
	Each node has attribute height, which is relative to most recently sampled taxon. 
	
	Parameters
	----------
	nwk : string
		Newick format describing tree topology
	sampleTimes, sampleStates : dict
		keys are taxon names corresponding to values in nwk. sampleTimes
		have values type float with date of taxon. Date can be 
		relative to any time. Dates are transformed into heights 
		relative to the most recent sample. sampleStates have values 
		type array(float). 
	nwkstrings : list
		contains strings, each of which is a Newick format tree
	
	Methods
	-------
	extant_lineages_in_interval(h0, h1) : h0 and h1 are type float, h0 < h1, representing time in past relative to most recent sample. 
		Returns list of branches from Genealogy.
	"""
	number_of_taxa =0
	NSTATES = None
	def __init__(self, nwkstring, sampleTimes, sampleStates):
		"""
		sampleTimes and sampleStates are dictionaries keyed by taxon names in the newick
		"""
		self.NSTATES = len(sampleStates.values()[0])
		self.taxa = list()
		
		super(Genealogy, self).__init__(self)
		
		self.sampleTimes = sampleTimes
		self.sampleStates = sampleStates
		self.nwk = nwkstring
		
		#nwk > DiGraph with BioPython
		self.nwkstrings = nwkstrings = nwkstring.split(';')
		self.biopython_phylos = biopython_phylos= list()
		for nwk in nwkstrings:
			nwk = nwk.strip()
			
			if len(nwk)>0 and nwk!="\n":
				if nwk[-1]!=';':
					nwk+=';'
				failed = True
				try:
					#~ pdb.set_trace()
					biopython_phylo = Phylo.read(StringIO(nwk), "newick")
					biopython_phylos.append(biopython_phylo)
					failed = False
				except: # the newick is empty or otherwise corrupt
					#~ pdb.set_trace()
					print nwk
					#~ pass
					pdb.set_trace()
					
				if not failed:
					digraph = Phylo.to_networkx(biopython_phylo)
					for u,v in digraph.edges():
						if u.name is not None:
							u.name = u.name.strip('\'')
						if v.name is not None:
							v.name = v.name.strip('\'')
						#
						if u.is_parent_of(v):
							self.add_edge(u,v)
						else:
							self.add_edge(v,u)
					#
					#~ pdb.set_trace() #
				#~ self.add_edges_from(digraph.edges())
				
		# add heights, sample times , and states
		self.max_sample_time = max_sample_time = max(sampleTimes.values())
		for node in self.nodes():
			
			if node.is_terminal():
				self.number_of_taxa += 1
				self.taxa.append(node)
				node.sample_time = sampleTimes[node.name]
				node.state = sampleStates[node.name]
				assert len(node.state)==self.NSTATES
				node.height = max_sample_time - node.sample_time
		#
		nupdates = 1
		while nupdates > 0:
			nupdates = 0
			for u,v in self.edges():
				if v.__dict__.has_key('height') and not u.__dict__.has_key('height'):
					nupdates+=1
					u.height = v.height + v.branch_length
		#
		
		# july 9 add root edges
		for node in self.nodes():
			if self.in_degree(node)==0:
				clades = [clade for clade in node]
				rootclade = Phylo.BaseTree.Clade( branch_length=1., name='root', clades= clades) #max_sample_time - node.height
				rootclade.height = node.height + 1 #max_sample_time #retrospectively, the beginning of the epidemic
				self.add_edge(rootclade, node)
		
		#~ pdb.set_trace()
	
	
	
	def extant_lineages_in_interval(self, t1, t2):
		# returns branch with any overlap with interval [t1, t2)
		if t1 > t2:
			return list()
		#
		branches = list()
		for u,v in self.edges():
			if (u.height > t1 and u.height <= t2) or (v.height >= t1 and v.height < t2) or (u.height >= t2 and v.height <= t1):
				branches.append((u,v))
		#
		#~ pdb.set_trace()
		return branches
	#
#


class Likelihood(object):
	""" july 22 2011
	Likelihood_Discrete_v12(taxis, prevalence, births, migrations, genealogy)
	
	Computes the likelihood of a gene genealogy conditional on a demographic history.
	
	Parameters
	----------
	taxis : array of length T, type float
		Gives the time corresponding to each matrix in births and migrations.
		*Must* have equally spaced values. 
	births and migrations : length T tuple of mXm arrays
	prevalence : TXm array, each row giving the population size
	genealogy : Genealogy
		each branch has keys for a number of lists: 
			heights-- the time axis for other variables 
			pik-- each element is m-array; pmf for state of branch
	
	Value
	-----
	log_likelihood : float
	lik_terms : list of type float
		Gives the likelihood of observing each internode interval. 
	
	
	Methods
	-------
	F_G_Y(t) : t float, time
		returns tuple of (mXm array type float, mXm array type float, m array type float).
		F is matrix of birth rates, G is matrix of migration rates, Y is array of population sizes. 
		These values are interpolated (linear) from the sequence of matrices provided on initialization. 
	"""
	def F_G_Y(self, t):
		tt = self.genealogy.max_sample_time - t
		Y = array([self.interp_prevalence[k](tt) for k in range(self.m)]).flatten()
		dF =zeros( (self.m, self.m))
		dG = zeros( (self.m, self.m) )
		for k,l in self.nonZeroElements:
			try:
				dF[k,l] = self.interp_cum_births[(k,l)].derivatives(tt)[1]
			except: # the derivative is not defined 
				dF[k,l] = 0.
			try:
				dG[k,l] = self.interp_cum_migrations[(k,l)].derivatives(tt)[1]
			except:
				dG[k,l] = 0.
		#
		return abs(dF), abs(dG), Y
	
	
	
	def _state_at_internal_node_and_likelihood(self, internalnode, childnodes, birth, prev, A, S):
		""" july 20 2011
		"""
		g = self.genealogy; m = self.m
		
		c1,c2 = childnodes
		try:
			lastpik1 = g[internalnode][c1]['pik'][-1]
		except:
			pdb.set_trace()
		lastpik2 = g[internalnode][c2]['pik'][-1]
		
		X = zeros((m,m))
		lambdai = 0.
		lambdaj =0.
		Lambda =0.
		for k in range(m):
			for l in range(m):
				if k!=l:
					X[k,l] = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
					lambdai = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*A[l])
					lambdaj = (birth[k,l]/prev[k]/prev[l]) * (lastpik2[k]*A[l])
					Lambda = (birth[k,l]/prev[k]/prev[l]) * (A[k]*A[l])
				else:
					try:
						X[k,l] = (birth[k,l]/prev[k]/(prev[l]-1)) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
						lambdai = (birth[k,l]/prev[k]/(prev[l]-1)) * (lastpik1[k]*(A[l]-1))
						lambdaj = (birth[k,l]/prev[k]/(prev[l]-1)) * (lastpik2[k]*(A[l]-1))
						Lambda = (birth[k,l]/prev[k]/(prev[l]-1)) * (A[k]*(A[l]-1))
					except:
						X[k,l] = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*lastpik2[l] + lastpik1[l]*lastpik2[k])
						lambdai = (birth[k,l]/prev[k]/prev[l]) * (lastpik1[k]*A[l])
						lambdaj = (birth[k,l]/prev[k]/prev[l]) * (lastpik2[k]*A[l])
						Lambda = (birth[k,l]/prev[k]/prev[l]) * (A[k]*A[l])
				#
		#
		rowsumX =  sum(X, axis = 1)  
		sumX = lambdaij = sum(rowsumX)
		
		# update likelihood
		lik_term11 =  S  * lambdaij 
		self.lik_terms.append(lik_term11)
		
		self.S_list.append(S)
		self.S = 1.
		
		if sumX==0:
			return lastpik1
		newpik = rowsumX / sumX  #pik
		return newpik
	#
	
	
	def _initialize_new_branches(self, branches, birth, prev, A, S):
		""" 
		A and S correspond to the point in time when the new branch occurs (going backwards), eg an internal node of the tree
		"""
		g = self.genealogy; m = self.m
		for branch in branches:
			parent, child = branch
			g[parent][child]['s'] = [ 1.]
			g[parent][child]['S'] = [1.]
			g[parent][child]['heights'] = [child.height]
			if child.is_terminal():
				g[parent][child]['pik'] = list()
				try:
					g[parent][child]['pik'].append( array(child.state) )
				except:
					pdb.set_trace()
				
				self.p_alpha_k = None
			else:# child is internal node
				self.p_alpha_k = self._state_at_internal_node_and_likelihood(child, self.genealogy.successors(child), birth, prev, A, S)
				g[parent][child]['pik'] = [ self.p_alpha_k ]  
			#
		#
		
	#
	
	
	def __init__(self, taxis, prevalence, births, migrations, genealogy):
		"""July 20 2011
		"""
		self.log_likelihood = 0.
		m = self.m = len(prevalence[0])
		T = self.T = len(prevalence)
		n = self.n = genealogy.number_of_nodes()
		self.genealogy  = g = genealogy
		self.births     = copy(births)
		self.migrations = copy(migrations)
		self.prevalence = copy( prevalence)
		
		nlft_k = self.nlft_k = list()
		S_list = self.S_list = list()
		
		#~ determine which elements are nonzero
		birthsMigrations = [b + m for b,m in zip(births,migrations)]
		birthsMigrations =sum(birthsMigrations, axis=0)
		self.nonZeroElements = list()
		for k in range(len(birthsMigrations)):
			for l in range(len(birthsMigrations)):
				if birthsMigrations[k,l] > 0 or birthsMigrations[l,k]>0:
					self.nonZeroElements.append((k,l))
		#
		
		
		# for interpolation of births,migrations, population size: 
		self.cum_births = [births[0]]
		self.cum_migrations = [migrations[0]]
		for b,m in zip(births[1:], migrations[1:]):
			self.cum_births.append( self.cum_births[-1] + b ) 
			self.cum_migrations.append( self.cum_migrations[-1] + m )
		#
		self.interp_cum_births = dict()
		self.interp_cum_migrations = dict()
		self.interp_prevalence = dict()
		for k in range(self.m):
			y = [p[k] for p in self.prevalence ]
			self.interp_prevalence[k] = InterpolatedUnivariateSpline(taxis, y)
			for l in range(self.m):
				b = [ bb[k,l] for bb in self.cum_births]
				m = [ mm[k,l] for mm in self.cum_migrations]
				self.interp_cum_births[(k,l)] = InterpolatedUnivariateSpline(taxis, b)
				self.interp_cum_migrations[(k,l)] = InterpolatedUnivariateSpline(taxis, m)
		
		
		# reorganize variables for reverse time axis
		r_t_i = argmin(abs(g.max_sample_time-taxis)) + 1
		#~ r_t = height = self.height = self.heights = g.max_sample_time - taxis[:r_t_i][::-1] 
		r_t = height = self.height = self.heights =  taxis[:r_t_i]
		# should always inlude zero aligned with g.max_sample_time
		
		deltat = self.deltat = height[1] - height[0]
		
		
		self.lik_terms = list()
		self.lik_terms11 = list()
		
		coalescent_or_sample_times = [ (parent.height, child.height) for parent,child in g.edges() ] 
		coalescent_or_sample_times = unique( ravel(coalescent_or_sample_times) )  
		
		A = zeros(self.m)
		self.S = 1. 
		
		mrca_reached = False
		
		for i, h0, h1 in zip(range(len(coalescent_or_sample_times)-1), coalescent_or_sample_times[:-1], coalescent_or_sample_times[1:]):
			branches = g.extant_lineages_in_interval(h0, h1)
			
			if  len(self.lik_terms)==g.number_of_nodes()-1 and len(branches)<=1: #
				mrca_reached = True
				break #reached MRCA
			#
			
			F0,G0,Y0 = self.F_G_Y(h0)
			
			
			newbranches =  [branch for branch in branches if branch[1].height == h0] 
			self._initialize_new_branches(newbranches, F0, Y0, A, self.S)
			
			# update other branches at node; finite size corrections; 
			# this will only work if sample times do *not* coincide with internal node times
			if self.p_alpha_k is not None: 
				oldbranches = [branch for branch in branches if branch[1].height!=h0]
				for parent,child in oldbranches:
					for k in range(self.m):
						p_i = g[parent][child]['pik'][-1]
						try:
							pik= p_i[k] #
							
							second_term = sum([ self.p_alpha_k[l]* pik*Y0[l]/(Y0[l]-p_i[l])  for l in range(self.m) if l!=k])
						except:
							pdb.set_trace()
						g[parent][child]['pik'][-1][k] = self.p_alpha_k[k]* pik*(Y0[k]-1.)/(Y0[k]-pik) + second_term
				#
			#
			
			status2 , A, self.S = self._update_branches(branches,  h0, h1)
			if self.n_lineages_exceeds_population_size:
				break
			if status2 < 0:
				break # less than two extant lineages or population size less than nlft
			#
			
			nlft_k.append((h1,A))
		#
		
		if self.n_lineages_exceeds_population_size:
			self.log_likelihood = -inf
		else:
			self.log_likelihood = self.log_likelihood11 = sum(log(self.lik_terms))
			self.log_likelihood += log(self.lik_terms[-1]) * (g.number_of_taxa - 1 - len(self.lik_terms))
			#~ self.log_likelihood += mean(log(self.lik_terms)) * (g.number_of_taxa - 1 - len(self.lik_terms))
		#
		
	#
	
	def qdot(self, x, t): 
		"""
		version 11, only one survivor function for the internode interval
		"""
		F,G,Y = self.F_G_Y(t)
		
		A = x[:self.m]
		Q = x[self.m:(self.m+self.m**2)].reshape((self.m, self.m))
		S = x[-1]
		
		A = minimum(A,Y) # A cannot exceed Y
		
		fractionYnotA = [ max((Y[k] - A[k]) / Y[k],0.) for k in range(self.m)]
		
		#dA
		dx = zeros(len(x))
		for k,l in self.nonZeroElements:
			dx[k] +=  A[l] * G[k,l] / Y[l] \
				  - A[k] * G[l,k]/Y[k] \
				  + (A[l] / Y[l]) * fractionYnotA[k]  *  F[k,l] \
				  - (A[k]/ Y[k])  * fractionYnotA[l]  *  F[l,k] 
		#
		
		#dQ
		lambdaS = 0.
		R = zeros((self.m,self.m))
		for k,l in self.nonZeroElements:
			if k!=l:
				lambdaS += F[k,l] * (A[k]/Y[k]) * (A[l]/Y[l])
				#~ R[k,l] += (GoY[l,k] + YmAoY[l,k] )#
				R[k,l] += G[l,k] / Y[k] + F[l,k] * fractionYnotA[l]/ Y[k] 
			else:
				lambdaS += F[k,l] * (A[k]/Y[k]) * abs((A[l]-1)/(Y[l]-1))  
			#
		#
		for k in range(self.m):
			R[k,k] = - sum(R[k])
		#
		for i in range(self.m):
			p_i = Q[i]
			dp_i = dot(p_i, R)
			indices = range(self.m + i * self.m, self.m + i * self.m + self.m)
			dx[indices] = dp_i
		#
		
		#dS
		dx[-1] = -S * lambdaS
		
		
		return dx
	#
	
	n_lineages_exceeds_population_size= False
	def _update_branches(self, branches, lower_h, upper_h):
		# updates state of branches at height upper_h
		g = self.genealogy; m = self.m
		
		self.nbranches = nbranches = len(branches)
		
		if nbranches < 1:
			return -2, 1, 1
		#
		
		# make initial conditions
		A0 =zeros(self.m)
		for branch in branches:
			parent, child = branch
			pik = g[parent][child]['pik'][-1]
			try:
				A0+=pik
			except:
				pdb.set_trace()
		#
		Q0 = eye(self.m)
		S0 = self.S
		x0 = concatenate((A0, ravel(Q0), [S0] ))
		t = linspace(lower_h, upper_h, 2.) #the upper bound on the time axis is arbitrary
		
		# check that number lineages does not exceed population size
		F,G,Y = self.F_G_Y(lower_h)
		
		
		# solve ode's
		x1t = odeint(self.qdot, x0, t) #
		x1= x1t[-1]
		A1 = x1[:self.m]
		Q = x1[self.m:(self.m+self.m**2)].reshape((self.m, self.m))
		S1 = x1[-1]
		#~ s_Q = x1[(self.m+self.m**2):]
		
		# renormalization step to prevent numerical instabilities
		Q = abs(Q)
		rowsumQ = sum(Q, axis = 1)
		Q = Q / rowsumQ.reshape((len(Q),1))
		
		#update state of branches
		for i, branch in enumerate(branches):
			parent, child = branch
			oldpik = g[parent][child]['pik'][-1]
			newpik = dot( Q.T , oldpik)
			
			g[parent][child]['pik'].append(newpik)
			g[parent][child]['heights'].append(upper_h)
			
			g[parent][child]['s'].append(1)
			g[parent][child]['S'].append(S1)
		#
		
		return 0, A1, abs(S1)
#

def load_flatfile_TXmXm( fn):
	"""reads csv flatfile from fn with dimensions (m*T)Xm
		optionally, matrices can be separated by blank lines
	"""
	r = csv.reader(open(fn, 'r'))
	d = [row for row in r if len(row)>0]
	m = len(d[0])
	o = list()
	
	#~ pdb.set_trace()
	
	for i in range(len(d)  / m):
		b = array(  d[i*m:(i*m+m)] , dtype = float)
		o.append( b )
	#
	return o
#

def load_births( fn):# ie transmissions
	"""produces a length T tuple of mXm arrays
	reads csv flatfile from fn with dimensions (m*T)Xm
		optionally, matrices can be separated by blank lines
	"""
	return  load_flatfile_TXmXm(fn)
#
def load_migrations( fn):# ie stage transitions
	return load_flatfile_TXmXm(fn)
#
def load_taxis(fn):# single column of data, length T
	return numpy.loadtxt(fn, delimiter = ',')
#
def load_prevalence( fn):
	r = csv.reader(open(fn, 'r'))
	d = [row for row in r if len(row) > 0]
	return array(d, dtype=float)
#


if __name__=='__main__':
	usageString = """may 27 2011
	calculates log likelihood given various inputs stored on disk
	
	Usage:  python likelihood.py nwkFN sampleTimesFN sampleStatesFN taxisFN prevalenceFN birthsFN migrationFN
	python likelihood.py odesir_coalescentTest_tree.nwk odesir_coalescentTest_sampleTimes.csv odesir_coalescentTest_sampleStates.csv odesir_coalescentTest_time.csv odesir_coalescentTest_prev.csv odesir_coalescentTest_births.csv odesir_coalescentTest_migration.csv
	
	# for likelihood:  taxis, prevalence, births, migrations, genealogy
	# for genealogy:  nwkstring, sampleTimes (dict), sampleStates (dict)
	
	sampleTimes # file format should be csv nX2, (taxon id, value (float))
	sampleStates # file format should be csv nX(m+1), (taxon id, probability of state)
	
	taxis should be a single column type float
	prevalence should be csv with dimensions TXm, where T is the number of time points in taxis.
	births and migrations with m states should be csv table with dimensions (m*T)Xm , 
		where T is the number of time points.
		Optionally each of the T matrices can be separated by a blank line.
	
	"""
	try:
		nwkFN, sampleTimesFN, sampleStatesFN, taxisFN, prevalenceFN, birthsFN, migrationFN  = sys.argv[1:]
		# genealogy
		nwk = open(nwkFN, 'r').read()
		# file format should be nX2, (taxon id, value (float))
		sampleTimes = dict( [ (row[0], float(row[1])) for row in csv.reader(open(sampleTimesFN, 'r'))  ]  )
		# file format should be nX(m+1), (taxon id, probability of state)
		sampleStates = dict( [ (row[0], array(row[1:],dtype=float) ) for row in csv.reader(open(sampleStatesFN, 'r'))  ]  )
		
		births = load_births(birthsFN)
		migrations = load_migrations(migrationFN)
		prevalence = load_prevalence(prevalenceFN)
		taxis = load_taxis(taxisFN)
		
		
		m = len(births[0])
		if m>1 and len(sampleStates.values()[0])==1:
			# taxon states must be point mass
			for k,v in sampleStates.items():
				s = zeros(m)
				s[int(v)] = 1.
				sampleStates[k] = copy(s)
		
	except:
		print usageString
	
	
	
	
	genealogy = Genealogy(nwk, sampleTimes, sampleStates)
	
	print 'start likelihood calc', time.ctime()
	l= Likelihood(taxis, prevalence, births, migrations, genealogy)
	print 'completed likelihood calc', time.ctime()
	
	print 'Deprecation Warning: This code is not being updated. Use likelihoodC.py'
	print l.log_likelihood
#
