"""
@author Erik M Volz
@date Aug 6 2011
@version 0.1.0

Simulate a coalescent tree conditional on a complex demographic history.
"""
__version__='0.1.0'


from pylab import *
import networkx, numpy, pdb, csv, time, copy
from scipy.stats.distributions import rv_discrete
from Bio import Phylo
from cStringIO import StringIO
import likelihoodC

class Simulator2:
	""" June 5, 2011
	Simulate a coalescent tree conditional on a complex demographic 
	history.
	
	Parameters
	----------
	taxis : array-like float
		length T time axis. 
	prevalence : array-like float
		Dimension mXT. Each row is the population size for each of m 
		states at each of T time points. 
	births and migrations : tuple
		Length T. Each element is array-like mXm type float.
	sampleTimes and sampleStates : list (float and 
		numpy.array(dtype=float) respectively.)
		Each element corresponds to a sampled taxon. 
		sampleStates are length m arrays with values summing to 1. 
	singleMRCA=True : will ensure that a single tree is returned. 
		Otherwise multiple trees may be returned if there is not a 
		single MRCA at the final iteration of the algorithm.
	
	Value
	-----
	nwkstring : string
		Newick format string representation of tree topology. 
		There may be multiple trees separated by ';' if all lineages
		have not coalesced prior to taxis[0]. 
	nlft : array
		The number of lineages as a function of time. 
		Each row is (height, # of lineages). 
	
	
	While the simulator assumes that taxon states are known with 
	certainty, the sampleStates variables must describe the probability
	that the taxon is in each state for compatibility with likelihood 
	functions. 
	"""
	def __init__(self, taxis, prevalence, births, migrations, sampleTimes, sampleStates, singleMRCA = False):
		#~ sampleTimes and sampleStates are type list (float and integer resp.), dimension n
		n = self.n = len(sampleStates)
		m = self.m = len(births[0][0])
		nlft = self.nlft = [(0.,n)] # will contain n-1X2 array of number lineages as a function of time
		
		# convert aggregated births and migrations to rates
		n_births = [zeros((m,m))]
		n_migrations = [zeros((m,m))]
		for i in range(1,len(taxis)):
			interval = taxis[i] - taxis[i-1]
			n_births.append(births[i] / interval)
			n_migrations.append(migrations[i] / interval)
		
		# reorganize variables for reverse time axis
		max_sample_time = max(sampleTimes)
		r_t_i = argmin(abs(max_sample_time-taxis))
		r_t = taxis[:r_t_i]#.tolist()
		r_t = max(r_t) - r_t
		r_t= r_t[::-1]
		
		
		#~ height = concatenate(([-1.],height))
		r_prevalence = prevalence[:r_t_i][::-1]
		r_births = n_births[:r_t_i][::-1]
		r_migrations = n_migrations[:r_t_i][::-1]
		
		height = self.height = taxis[:r_t_i].tolist() + sampleTimes
		height = sort(unique(height))
		
		rh_prevalence =  [zeros(self.m)]
		rh_births =  [zeros((self.m, self.m))]
		rh_migrations = [zeros((self.m, self.m))]
		r_sampleTimes = ( max_sample_time - array( sampleTimes ) )
		heights = [0.]
		for i, r0, r1 in zip( range(len(r_t)-1), r_t[:-1], r_t[1:] ):
			delta = r1 - r0
			#~ heights.append(r0)
			lower_h = r0
			intermediate_heights = [r1] #list() #[r1]
			for st in unique(r_sampleTimes):
				if st > r0 and st < r1:
					#~ heights.append(st)
					intermediate_heights.append(st)
			#
			for ih in sorted(intermediate_heights):
				heights.append(ih)
				try:
					rh_prevalence.append( r_prevalence[i])
					rh_births.append(r_births[i])
					rh_migrations.append(r_migrations[i])
				except:
					pdb.set_trace()
			#
		#
		
		#~ pdb.set_trace()
		
		
		# initialize tree with first branch
		genealogy_networkx = self.genealogy_networkx = networkx.DiGraph() #
		#~ likelihood.Genealogy__init__(self, nwkstring, sampleTimes, sampleStates):
		
		
		lower_h = -inf #0.
		nSampleTimes = len(unique(sampleTimes))
		lineagesSampledAt_dict = dict.fromkeys(r_sampleTimes, None)#[[]]*nSampleTimes )
		branch_terminal_time = dict() #.fromkeys(range(n))
		lineage_names = list()
		for i, st in enumerate(r_sampleTimes):
			l = repr(i) + '_'
			lineage_names.append(l)
			branch_terminal_time[l] = st
			try:
				lineagesSampledAt_dict[st].append(l)
			except:
				lineagesSampledAt_dict[st] = [l]
		#
		#~ extant_lineages = copy.deepcopy(lineagesSampledAt_dict[0.])
		extant_lineages_of_type = dict()
		for k in range(m):
			extant_lineages_of_type[k] = list()
		#
		taxonState_dict = dict(zip(lineage_names, sampleStates))
		next_lineage_id = -1 # will count downwards for internal branches 
		A = 0
		
		ctime_list = list() # for debugging
		
		
		def finished(h, heights, extantLineages):
			if h < max(heights):
				return False
			elif h>= max(heights) and (len(extantLineages) > 1) and singleMRCA:
				return False
			else:
				return True
		
		pik_dict = dict()
		T = len(heights)
		h = heights[0]
		extantLineages = list()
		h_index = 0
		varzip = zip(range(T), heights, rh_prevalence, rh_migrations, rh_births)
		#~ for i, h, p, M, b in zip(range(T), heights, rh_prevalence, rh_migrations, rh_births):
		while not finished(h, heights, extantLineages):
			try:
				i, h, p, M, b = varzip[h_index]
				h_index += 1
				interval_duration = h - heights[i-1] # this inverval should correspond to those of the rh_birth and rh_migration matrices
			except IndexError: #height > max(heights)
				#~ algo will continue of singleMRCA parameter is True
				#~ will re-use the coalescent rate that yeilded the last coalescent interval
				#~ this strategy will work even in situations where the model predicts no "births" at the first time step
				#~ last interval = 1/lambda = 1 / ( {A+1 choose 2}/Ne )
				#~ Ne = last interval * {A+1 choose 2} # like the skyline
				#~ lambda = {A choose 2} / Ne
				#~  so set p = ones(m), and b[k,l]= 1/Ne 
				#~ 
				M = zeros((m,m), dtype=float)
				p = ones(m, dtype=float)
				internodeInterval = ctime_list[-1] - ctime_list[-2]
				Ne = internodeInterval * ( (A+1)*A / 2. )
				interval_duration = heights[-1] - heights[-2]
				h+= interval_duration #10 * Ne / (A*(A-1)/2.)#interval_duration
				b = interval_duration * ones((m,m), dtype=float) / Ne
				
				#~ set all lines to state 0
				for k in range(1,m):
					for l in extant_lineages_of_type[k]:
						pik_dict[l] = array( [1.] + [0]*(m-1) )
						extant_lineages_of_type[0].append(l)
					#
					extant_lineages_of_type[k] = list()
				#
				#~ pdb.set_trace()
				pass
			
			#print i,h, p, M, b, time.ctime() #debug
			#new lineages sampled at h
			if lineagesSampledAt_dict.has_key(lower_h):
				for l in unique(lineagesSampledAt_dict[lower_h]):
					#~ pik_dict[l] = zeros(self.m)
					#~ pik_dict[l][ taxonState_dict[l] ] = 1.
					pik_dict[l] = taxonState_dict[l]
					extant_lineages_of_type[ argmax(taxonState_dict[l]) ].append(l)
					branch_terminal_time[l] = lower_h
				#
				A = sum([len(el) for el in extant_lineages_of_type.values()]) 
				AA = array([ len(extant_lineages_of_type[k]) for k in range(m) ])
				nlft.append((h, A))
				if sum(AA > p) > 0:
					i = argmax(AA - p)
					#~ raise likelihoodC.TreeIncompatibleWithModelException('Number of sampled lineages %i exceeds population size %i in state %i at tree height %1.2f' % (AA[i], p[i],i+1, lower_h))
					print 'Warning: Number of sampled lineages %i exceeds population size %i in state %i at tree height %1.2f' % (AA[i], p[i],i+1, lower_h)
					pass #
					
			#
			
			extantLineages = [el for el in flatten(extant_lineages_of_type.values())]
			if len(extantLineages)!=len(unique(extantLineages)):
				print extantLineages
				pdb.set_trace()
			
			
			
			#~ if h >= max(heights) and len(extantLineages) > 1 and singleMRCA:
				#~ pdb.set_trace()
			
			
			# do coalescents
			ctime = -inf#inf#-inf
			ctime_counter = 0 # for debugging
			
			while ctime < h and A > 1:
				ctime_counter+=1
				#if ctime_counter > n * nSampleTimes * 2:
				#	pdb.set_trace()
				
				A = sum([len(el) for el in extant_lineages_of_type.values()]) #len(extant_lineages)
				if A== 0:#initially there are no lineages; sampling is handled at end of loop
					break
				#
				
				
				
				#udpate state of lineages between lower_h and coalescent time or next interval
				anc = zeros(m)# number of ancestors
				for k in range(m):
					for l in extant_lineages_of_type[k]:
						anc+=pik_dict[l]
					#
				#
				
				#~ ctimes = eye(A) * inf
				lambda_kl = zeros((self.m, self.m))
				lambda2_kl = zeros((self.m, self.m)) # for migration
				sumlambda = 0.
				
				for k in range(self.m):
					for ll in range(self.m):
						if p[k]==0 or p[ll]==0:
							continue
						if k==ll:
							lambda_kl[k,ll] =(b[k,ll]/p[k]/p[ll]) * anc[k] * (anc[ll]-1.)
						else:
							lambda_kl[k,ll] =  (b[k,ll]/p[k]/p[ll]) * anc[k] * anc[ll]
							lambda2_kl[k, ll] =  M[ll,k] * anc[k] / p[k]   + \
							   ((p[ll] - anc[ll]) / p[ll]) *  b[ll, k] * anc[k] / p[k] 
							if isnan(lambda2_kl[k, ll]):
								lambda2_kl[k, ll] = 0.
						#
						
				#
				
				sumlambda = sum(lambda_kl) + sum(lambda2_kl)
				
				try:
					ctime =  exponential(scale = 1./sumlambda) + lower_h
				except:
					ctime = inf
				
				if ctime >= h:
					break
				#
				ctime_list.append(ctime) #
				
				# coalescent or state change happens
				pr_coalescent_conditional_on_event = sum(lambda_kl) / sumlambda
				#pdb.set_trace()
				if rand() < pr_coalescent_conditional_on_event: # do coalescent
					# new lineage
					#~ cti =ctimes.index(ctime)
					cti = rv_discrete(name = 'cti_rv', values = (arange(self.m**2,dtype=float), ravel(lambda_kl) / sum(lambda_kl)) ).rvs()
					kctime =  cti / self.m
					lctime = mod(cti, self.m)
					
					ictime = randint(0, anc[kctime])
					jctime = randint(0, anc[lctime])
					if kctime == lctime and ictime==jctime: 
						while ictime==jctime:
							jctime = randint(0, anc[lctime])
							if anc[lctime]==1:#shouldn't happen!
								pdb.set_trace()
					#
					try:
						l_i = extant_lineages_of_type[kctime][ictime] # index error here?
						l_j = extant_lineages_of_type[lctime][jctime]
					except:
						pdb.set_trace()
					
					# add new lineage
					# using this naming scheme, the name of each branch is a newick of the corresponding clade
					# the entire tree will be encoded in the name of the root
					branchLength_i = ctime - branch_terminal_time[l_i]
					branchLength_j = ctime - branch_terminal_time[l_j]
					if branchLength_i < 0:
						pdb.set_trace()
					if branchLength_j < 0:
						pdb.set_trace()
					#
					sli = str(l_i) + ':'+ str(branchLength_i)
					slj = str(l_j) + ':'+ str(branchLength_j)
					next_lineage_id = '('+sli+','+slj+')'
					
					# make new lineage pik
					pik1 = pik_dict[l_i]
					pik2 = pik_dict[l_j]
					pik_dict[next_lineage_id] = self._pik_of_new_branch(pik1, pik2, b, p)
					nkctime = rv_discrete(values = (range(m),pik_dict[next_lineage_id]), name = 'nkctime_rv').rvs()
					pik_dict[next_lineage_id] = zeros(m)
					pik_dict[next_lineage_id][nkctime] = 1.
					
					
					branch_terminal_time[next_lineage_id] = ctime
					genealogy_networkx.add_edge(next_lineage_id, l_i)
					genealogy_networkx.add_edge(next_lineage_id, l_j)
					if kctime==lctime:
						i1 = max(ictime, jctime)
						i2 = min(ictime, jctime)
						extant_lineages_of_type[kctime].pop(i1)
						try:
							extant_lineages_of_type[kctime].pop(i2)
						except:
							raise
							#~ pdb.set_trace()
					else:
						extant_lineages_of_type[kctime].pop(ictime)
						extant_lineages_of_type[lctime].pop(jctime)
					#
					extant_lineages_of_type[nkctime].append(next_lineage_id)
					
					
					#print ctime_counter, h, ctime,  A, time.ctime() #DEBUG
					
					nlft.append((ctime, A) )
				else: # state change for a lineage
					try:
						cti = rv_discrete(name = 'cti_rv', values = (range(self.m**2), ravel(lambda2_kl) / sum(lambda2_kl)) ).rvs()
					except:
						pdb.set_trace()
					#
					kctime =  cti / self.m
					lctime = mod(cti, self.m)
					if kctime!=lctime:
						ictime = randint(0, anc[kctime])
						#~ try:
							#~ l_i = extant_lineages_of_type[kctime][ictime]
						#~ except:
							#~ pdb.set_trace()
						#~ extant_lineages_of_type[kctime].pop(ictime)
						try:
							l_i = extant_lineages_of_type[kctime].pop(ictime)
						except:
							pdb.set_trace()
						extant_lineages_of_type[lctime].append(l_i)
						pik_dict[l_i]  = zeros(m)
						pik_dict[l_i][lctime] = 1.
				#
				
				lower_h = ctime
				
				# debug: 
				extantLineages = [el for el in flatten(extant_lineages_of_type.values())]
				if len(extantLineages)!=len(unique(extantLineages)):
					print extantLineages
					pdb.set_trace()
			#
			
			lower_h = h
		#
		
		#~ networkx.draw(genealogy_networkx)
		#~ show()
		nlft = array(nlft)
		self.newick = '' #extant_lineages[0]+';'
		extant_lineages = concatenate(extant_lineages_of_type.values() )
		for l in extant_lineages:
			self.newick += str(l)+';\n'
		#
		self.newick = self.nwkstring = self.newick[:-1]
		#print
		#print self.newick
		#print
		#~ pdb.set_trace()
	#
	
	def _pik_of_new_branch(self, pik1, pik2, b, p):
		m = self.m
		X = zeros((m,m))
		for k in range(m):
			for l in range(m):
				#~ if k!=l: # will make the large size approximation
				if p[k]==0 or p[l]==0:
					continue
				try:
					X[k,l] = (b[k,l]/p[k]/p[l]) * (pik1[k]*pik2[l] + pik1[l]*pik2[k])
				except:
					X[k,l] = 0.
		#
		rowsumX =  sum(X, axis = 1)  
		rowsumX = rowsumX.reshape( (self.m ))  
		sumX = sum(rowsumX)
		
		#~ pdb.set_trace()
		if sumX==0:
			return pik1
		#
		return rowsumX / sumX  #pik
	#
#
#
#~ import cProfile, pstats
#~ python -m cProfile coalescentSimulator.py
if __name__=='__main__':
	usageString = """
	python coalescentSimulator.py  <birth csv filename> <migration csv filename> <population size csv filename> <time axis filename> [-n <sample size>] [-o <output newick filename>] [--singleMRCA]  
	
	example: python coalescentSimulator.py  births_6.csv  migrations_6.csv populationSize_6.csv  time_6.csv -n 350 -o tree.nwk --singleMRCA
	
	options : 
	-o <filename> : destination of newick produced by simulator
	-o <sample size> : the number of samples for a homochronous sample size
	--singleMRCA : if present, simulator will continue beyond the first time point in the epidemic if there is not yet a single MRCA. 
	
	Command-line version does not support heterochronous samples, but this can be done by writing a python script. 
	"""
	try:
		birthsFN, migrationFN, prevalenceFN,taxisFN = sys.argv[1:5]
	except:
		# these files should exist for testing purposes
		birthsFN       = 'odesir_coalescentTest_births.csv'
		migrationFN    = 'odesir_coalescentTest_migration.csv'
		prevalenceFN   = 'odesir_coalescentTest_prev.csv'
		taxisFN        = 'odesir_coalescentTest_time.csv'
		
		print 'using default input files'
		print """
		birthsFN       = 'odesir_coalescentTest_births.csv'
		migrationFN    = 'odesir_coalescentTest_migration.csv'
		prevalenceFN   = 'odesir_coalescentTest_prev.csv'
		taxisFN        = 'odesir_coalescentTest_time.csv'
		"""
	
	tokens = sys.argv[5:]
	kwargs = dict()
	c_s = True
	try:
		try:
			i = tokens.index('-o')
			kwargs['-o'] = tokens[i+1]
		except ValueError:
			pass
		try:
			i = tokens.index('-n')
			kwargs['-n'] = tokens[i+1]
		except ValueError:
			pass
		if tokens.count('--singleMRCA') > 0:
			singleMRCA = True
		else:
			singleMRCA = False
	except:
		print usageString
		#~ raise
		c_s = False
	
	
	if kwargs.has_key('-n'):
		n = int(kwargs['-n'])
	else:
		n = 350 
	if kwargs.has_key('-o'):
		treeFN = kwargs['-o']
	else:
		treeFN = 'tree.nwk'
	
	# likelihood
	
	try:
		births      = likelihood.load_births(birthsFN)
		migrations  = likelihood.load_migrations(migrationFN)
		prevalence  = likelihood.load_prevalence(prevalenceFN)
		taxis       = likelihood.load_taxis(taxisFN)
		
		simulate=True
	except:
		print 'Error : input files not found'
		#~ raise
		c_s = False
	#
	
	
	m = len(prevalence[-1])
	sampleStates = ((prevalence[-1])/(sum(prevalence[-1])) * n).astype(int)
	k = n - sum(sampleStates)
	sampleStates[argmax(prevalence[-1])] += k
	sampleStates = concatenate([[i]*ss for i,ss in enumerate(sampleStates)]).tolist()
	# each element of sampleStates should be an array of length m:
	sampleStates = [ eye(m)[ss] for ss in sampleStates  ]
	sampleTimes = [max(taxis)]*n #homochronous sample
	#~ sampleStates = [0]*(n/2) + [1]*(n/2) # state variables should start at zero; they are used as an index
	
	
	
	if c_s:
		s = Simulator2( taxis, prevalence, births, migrations, sampleTimes, sampleStates, singleMRCA = singleMRCA)
		print s.newick
		
		# serialize
		#~ treeFN = 'tree_'+repr(m) + '.nwk'
		sampleStatesFN = 'sampleStates_' + repr(m) + '.csv'
		sampleTimesFN = 'sampleTimes_' + repr(m) + '.csv'
		
		treef = open(treeFN, 'w')
		treef.write(s.newick)
		treef.close()
		
		ssw = csv.writer(open(sampleStatesFN, 'w'))
		ssw.writerows( [  ['%i_' % i] + ss.tolist() for i,ss in enumerate(sampleStates)] )  
		
		stw = csv.writer(open(sampleTimesFN, 'w'))
		stw.writerows([  ('%i_' % i,st) for i,st in enumerate(sampleTimes) ])

