import objects as obj
import time
from math import *
import numpy as np




def value_elimination(A, B):
	"""
	in order to compute Pearson coefficient calculation, both vectors x, y must have the same dimension
	reducing dimension by removing values that are out of mean - var 
	"""
	# A longer than B
	if len(A) > len(B):
		var= np.array(A).var()
		mean = sum(A)/len(A)
		index = 0
		while len(A) > len(B) and index < len(A):
			if A[index] > mean + var or  A[index] > mean - var :
				A.pop(index)
			index +=1
		# if A still longer than B... remove the last values until both vector have the same dimension
		if len(A) > len(B):
			while len(A) > len(B):
				A.pop()

def r_coef(la, lb):
	"""
	calculate r coef with numpy
	"""
	if not la or not lb: return 0.
	
	if len(la) > len(lb):
		value_elimination(la, lb)
	elif len(la) < (len(lb)):
		value_elimination(lb,la)
		
	return np.corrcoef(la, lb)[1][0]
	
	


# matrix correlation calcul over 2 samples ?
def calc_correlation_matrix( pkl1, pkl2, threshold=0.75):
	"""
	WRONG
	"""
	#calcul matrice tcriangulaire
	matrix =[]
	for i in xrange (len(pkl1)):
		matrix.append([])
		for j in xrange ( len(pkl2)):
			matrix[i].append (r_calculation_point(pkl1[i].get_data(),pkl2[j].get_data()))
	return matrix

"""
[Loss of Sulfate]	1	1	-79.9568	0
[2 x Dechlorination]	1	1	-69.9377	0
[2 x (Dechlorination+H)]	1	1	-67.922	0
[Loss of CH3COOH]	1	1	-60.0211	0
[Loss of acetate]	1	1	-59.0133	0
[Loss of EtOH]	1	1	-46.0419	0
[Loss of HCOOH]	1	1	-46.0055	0
[Loss of formiate]	1	1	-44.9977	0
[Loss of CO2]	1	1	-43.9898	0
[Dechlorination]	1	1	-34.9689	0
[Dechlorination+H]	1	1	-33.961	0
[Loss of MeOH]	1	1	-32.0262	0
[Loss of CO]	1	1	-27.9949	0
[LossH20]	1	1	-18.0106	0
[Defluorination+H]	1	1	-17.9906	0
[Loss NH3]	1	1	-17.0265	0
[Loss of O]	1	1	-15.9949	0
[Demethylation]	1	1	-14.0157	0
[IDEM]	1	1	0	0
[C13]	1	1	1.0034	0
[Adduct Formiate]	1	1	46.0055	0
[Adduct acetate]	1	1	60.0211	0
[sulfate - Sulfate conjugation]	1	1	79.9568	0
"""

######CLUSTERING ALGORITHM######
def Clusterization(spl, peak_threshold= 0, mass_to_check=[-18., -44.,-80., -70., -68., -60., -59., -46., -45., -35., -32., -28., -17., -16., -14.], \
					error_rt =6, massif_length =6,suggested_peak_in =False, idms= False, idms_length=6, idms_spl=None ):
	"""
	by default:
		loss -80 -P03-
		loss -18 -H2O
		loss -44 -CO2
	firstAlgorithm
		mass_to_check -> list of loss after reading config file 
	"""
	
	peaks =spl.get_peak_list()
	if not isinstance(peaks, obj.RawPeakList):
		raise TypeError (" Object containing peaks have to be a RawPeakList")
		
	Clusters = obj.ListCluster(spl.getXmlFile())
	tmp_Clusters = obj.ListCluster(spl.getXmlFile())

	
	for i in xrange (len(peaks)):
		
		if peaks[i].get_area() > peak_threshold:
			
			grouping_peaks = obj.Clusters ( peaks[i])
			
			#find adducts
			for mass in mass_to_check:
				if spl.is_peak_in(peaks[i].get_trans_prec() + mass):
					p = peaks.Peak_at_Trans(peaks[i].get_trans_prec() + mass) 
					for pk in p:
						if abs(peaks[i].get_rt() - pk.get_rt() ) <= error_rt:
							#aggregation and elmination of the cluster
							#if the new adduct found is the owner of one cluster
							if  tmp_Clusters.cluster_at_M0(pk):
								clust, index = tmp_Clusters.cluster_at_M0(pk)
								if suggested_peak_in:
									for m in clust.get_adducts():
										if m not in grouping_peaks.get_adducts():
											grouping_peaks.add_adducts(m)
								grouping_peaks.add_adducts(clust.get_M0())
								
							#if the new adduct found belongs to another cluster's adducts
							elif tmp_Clusters.find_M0_adduct(pk):
								clust = tmp_Clusters.find_M0_adduct(pk)
								if suggested_peak_in:
									for tupl in clust:
										for p in tupl[0].get_adducts():
											if p not in grouping_peaks.get_adducts():
												grouping_peaks.add_adducts(p)
								grouping_peaks.add_adducts(clust.get_M0())
										
			#find isotopic cluster and idms in a same time
			j=1
			#initialize
			p = peaks.Peak_at_Trans(peaks[i].get_trans_prec()+j)
			for pk in p:
				if abs(peaks[i].get_rt() - pk.get_rt()) <= error_rt and peaks[i].get_area() > pk.get_area():
					prev_peak = pk
					grouping_peaks.add_to_iso(prev_peak)
			j += 1
			#looking for idms and isotopic cluster
			while  spl.is_peak_in(peaks[i].get_trans_prec() + j) and j < massif_length+1:
				p = peaks.Peak_at_Trans(peaks[i].get_trans_prec()+j)
				next_peak =None
				for pk in p:
					if abs(peaks[i].get_rt() - pk.get_rt()) <= error_rt and peaks[i].get_area() > pk.get_area():
						next_peak = pk
						break
				if next_peak:
					if next_peak.get_area() < prev_peak.get_area():
						grouping_peaks.add_to_iso(next_peak)
					else:
						if j < idms_length:
							grouping_peaks.set_idms(next_peak)
							grouping_peaks.add_to_iso(next_peak)
				else:
					break
				prev_peak = next_peak
				j +=1 
			
			#put empty cluster into unclusterized list
			if not grouping_peaks.is_empty():
				tmp_Clusters.append(grouping_peaks)
			else:
				Clusters.get_unclust_peak().append(peaks[i])
				del grouping_peaks
		#not good threshold
		else:
			Clusters.get_unclust_peak().append(peaks[i])
	
	#elimination of unuseful clusters
	for i, cl1 in enumerate(tmp_Clusters):
		is_useful = True
		for j, cl2 in enumerate(tmp_Clusters):
			if i !=j:
				#test if one M0 is an adduct 
				if cl1.get_M0() in cl2.get_adducts():# or cl1.get_M0() in cl2.get_isotopic_cluster():
					is_useful = False
		if is_useful:
			Clusters.append(cl1)

	del tmp_Clusters
	
	#test if there is conflict
	conflict_peaks = Clusters.is_in_conflict()
	
	spl.set_clusters(Clusters)	
		#resoudre les conflits ?

	print "peaks :", len(spl.get_peak_list())
	print "cluster :", len(Clusters)
	c =0.
	p =0.
	for cl in Clusters:
		"""
		print cl.get_M0(), cl.get_M0().get_rt(), 
		for ad in cl.get_adducts():
			print ad.get_M0(), ad.get_M0().get_rt()
		
		"""
		if not cl.is_empty():
			p+=1
			c+=1
			c+= cl.len_adduct()+ cl.len_isotopic_massif()
	print "identified peaks :", c
	print "found Cluster",p	
	print "conflict peaks", len(conflict_peaks)
	

def correlation_inter_samples(spl, lspl, error_rt, cor_threshold, flag):
	merged_peaks =obj.RawPeakList()#helping list
	
	for sple in lspl:
		if sple.getXmlFile() != spl.getXmlFile():
			merged_peaks.extend(sple.get_peak_list())
		
	#looking for peaks with same transition
	for peak in spl.get_peak_list()._gen_peaks():
		#if len(peak.get_iso_cluster()) >=2:
		identical_peaks =obj.RawPeakList()
		l =merged_peaks.peak_at_trans(peak.get_trans_prec())
		for elt in l:
			if abs(elt.get_rt() - peak.get_rt()) < error_rt:
				identical_peaks.add_peak(elt)
		correlation =0
		for elt in identical_peaks._gen_peaks():
			if flag =="iso":
				if len(elt.get_iso_areas())>=1:
					correlation += r_coef(peak.get_iso_areas().append(peak.get_area()), elt.get_iso_areas().append(elt.get_area()))
			elif flag=="frag":
				if len(elt.get_frag_areas())>=1:
					correlation += r_coef(peak.get_frag_areas().append(peak.get_area()), elt.get_frag_areas().append(elt.get_area()))
		#to avoid zero division !
		if identical_peaks:
			if flag=="iso":
				if correlation/len(identical_peaks) <= cor_threshold:
					peak.get_iso_cluster().set_valid(False)
				else:
					peak.get_iso_cluster().set_valid(True)
				peak.get_iso_cluster().set_inter_corr(correlation/len(identical_peaks))
			elif flag=="frag":
				if correlation/len(identical_peaks) <= cor_threshold:
					peak.get_frag_cluster().set_valid(False)
				else:
					peak.get_frag_cluster().set_valid(True)
				peak.get_frag_cluster().set_inter_corr(correlation/len(identical_peaks))
	
	
####CAMERA ALGORITHM######

#class clusterization_CAMERA(QtCore.QObject):
def clusterization_CAMERA(spl, lspl, error_rt=6, massif_length =6, use_correlation=False, cor_threshold =0.75, idms_length = 6,adducts_to_check=[18., 44.,80., 70., 68., 60., 59., 46., 45., 35., 32., 28., 17., 16., 14.], mode ="negative"):
	"""derivating from QObject for emitting signal"""
	#def __init__(self, spl, lspl, error_rt=6, massif_length =6, use_correlation=False, cor_threshold =0.75, idms_length = 6,adducts_to_check=[18., 44.,80., 70., 68., 60., 59., 46., 45., 35., 32., 28., 17., 16., 14.], mode ="negative"):
	#	QtCore.QObject.__init__(self)
	#	self.spl = spl
	#	self.lspl =lspl
		
	t=time.clock()
	peaks =spl.get_peak_list()
	
	if not isinstance(peaks, obj.RawPeakList):
		raise TypeError, "must be a rawpeaklist object"
		
	#make two lists
	peaks_with_iso =obj.RawPeakList()
	peaks_without_iso= obj.RawPeakList()
	
	
	#1,first find the isotopic_cluster...
	print "Isotopic cluster calculation... "
	list_iso=obj.RawPeakList()
	for peak in peaks._gen_peaks():#for i, peak in enumerate(peaks):
		 
		if peak not in list_iso: #avoid to calculate for every peaks
			iso_cluster= obj.ClusterList()
			for j in xrange (1,massif_length+1):
				p=peaks.peak_at_trans(peak.get_trans_prec()+j)
				pic= None
				for pk in p:
					if abs(peak.get_rt()-pk.get_rt()) <= error_rt and peak.get_area() > pk.get_area():
						pic =pk
						break
					#problem with m-1, m-2...TRUE
					elif abs(peak.get_rt()-pk.get_rt()) <= error_rt and peak.get_area() < pk.get_area():
						#pic=pk
						#peak.set_idms(pk)
						break
				if pic:
					iso_cluster.add_peak(pic)
					list_iso.add_peak(pic)
				else:
					break
			if iso_cluster:
				peak.set_iso_cluster(iso_cluster)
			#adding peak with isotopic_cluster in 
			#if peak.get_iso_cluster():
				peaks_with_iso.add_peak(peak)
			#else:
			#	peaks_without_iso.add_peak(peak)
		#self.emit(QtCore.SIGNAL("update_pb", (i/float(len(peaks)))*(20./100.)))	
	print "len peaks with iso", len(peaks_with_iso)		
	
	print "idms finder"
	#2,almost the same algorithm than isotopic cluster
	for peak in peaks_with_iso._gen_peaks():#for i, peak in enumerate(peaks_with_iso):
		#peak have to have an isotopic cluster
		#last_iso = peaks.get_iso_cluster()[-1]
		if idms_length > len(peak.get_iso_cluster()):
			for j in xrange(len(peak.get_iso_cluster())+1, idms_length+1 - len(peak.get_iso_cluster())):
				#print "j",j, "len iso_cluster", len(peak.get_iso_cluster())
				p = peaks.peak_at_trans(peak.get_trans_prec()+j)
				for pk in p:
					if abs(peak.get_rt()-pk.get_rt()) <= error_rt :#and pk.get_area() > last_iso.get_area():
						if abs(peak.get_trans_prec() - pk.get_trans_prec()) <=2:
							print "hola"
							print "Warning low idms value found", abs(peak.get_trans_prec() - pk.get_trans_prec()), "Carbon(s)"
						peak.set_idms(pk)
						break
				#go out for this peak
				break
				
	print "finished!"
	
	
	################################
	#START CAMERA ALGORITHM
	print "Creating possible M0..."
	RT_peak =[]
	#3,find for each peak peaks which matches with retention time
	for i,peak in enumerate(peaks_with_iso):
		l =obj.RawPeakList()
		l.add_peak(peak)
		for j, peak2 in enumerate(peaks_with_iso):
			if i!=j:
				if abs(peak.get_rt()- peak2.get_rt()) < error_rt:
					l.add_peak(peak2)
		#RT_peak.append(l)
		if i == 0:
			RT_peak.append(l)
		else:
			is_included = False
			for clust_rt in RT_peak:
				if abs(peak.get_rt() - (sum(clust_rt.as_rt_list())/len(clust_rt))) <= error_rt:
					if len(l) <= len(clust_rt):
						# inclusion test of l already in rt ?
						if  l== clust_rt:#l.is_included_in(clust_rt)*****************
							is_included = True
							#RT_peak.append(l)
			if not is_included:
				RT_peak.append(l)
				"""
				else:
					if RT_peak[-1].is_included_in(l):
						RT_peak.pop()
					RT_peak.append(l)
				"""
			#else:
			#	RT_peak.append(l)
	
	list_result =[] #list containing dictionaries
	for lst in RT_peak:
		#one dictionarie contains as keys all peaks in one cluster and as value, calculated values
		mass_generated_per_peak ={}
		#parcours de chaque peak au sein d'un cluster de RT
		for i, p in enumerate(lst._gen_peaks()):
			mass_generated_per_peak[p] =[]
			for mass in adducts_to_check:
				mass_generated_per_peak[p].append(p.get_trans_prec()+ mass)
			list_result.append(mass_generated_per_peak)
			
	
	final_list=[]#contient les dico des masses calculees avec les peaks, un dico pour chaque cluster de rt
	for  i, M0_masses in enumerate(list_result):
		res={} #dico corresponding to one cluster
		for key, val in M0_masses.iteritems():
			for potential_M0 in val:
				#res[potential_M0]=[1]
				if potential_M0 not in res.keys():
					res[potential_M0]=obj.ClusterList()
					res[potential_M0].append(key)
				else:
					if key not in res[potential_M0]:
						res[potential_M0].append(key)
		final_list.append(res)
		
	
	##########################
	print "Mapping of calculated mass on peaklist..."
	#4,see if one matches with peak in the raw peaklist		
	matching_mass =obj.RawPeakList()#object containing peaks which exists in the peaklist
	for dic in final_list:
		for mass in dic.keys():
			min_rt = min(dic[mass].as_rt_list())
			max_rt =max(dic[mass].as_rt_list())
			moy = (min_rt + max_rt)/2.
			p = peaks_with_iso.peak_at_trans(mass)
			for peak in p:
				if abs(peak.get_rt()- moy) < error_rt:
					peak.set_frag_cluster (dic[mass])
					matching_mass.add_peak(peak)

	
	print "Remove unuseful elements..."
	#ne prend pas en compte si on mappe plusieurs fois le meme peak
	treated_mass =obj.RawPeakList()
	#treated =[] #helping list
	matching_peaks =obj.RawPeakList()
	#for mass, rt in zip(matching_mass.as_trans_list(), matching_mass.as_rt_list()):
	for peak in matching_mass._gen_peaks():
	#if (mass,rt) not in treated:
		if peak not in treated_mass:
			#p = matching_mass.peak(mass, rt)
			if peak.get_iso_cluster():
				matching_peaks.add_peak(peak)
			treated_mass.add_peak(peak)
			
	del treated_mass
	#del matching_mass
	print "finished"

	################################	
	#5,second filter, correlation on the isotopic cluster between samples
	if use_correlation:
		print "Calculating correlation between samples..."
		#correlation calculation on both frag and iso intensities
		#correlation_inter_samples(spl, lspl, fin, error_rt, cor_threshold,flag="iso")
		correlation_inter_samples(spl, lspl, error_rt, cor_threshold,flag="frag")
		
		#correlation on frag peak shape intra_sample
		for peak in matching_peaks._gen_peaks():# matching_mass
			corr=0;k=0
			for i, frag in enumerate(peak.get_frag_cluster()._gen_peaks()):
				corr+= r_coef(peak.get_Y_data(), frag.get_Y_data())
				k+=1
				for j, frag2 in enumerate(peak.get_frag_cluster()._gen_peaks()):
					if i!=j:
						k+=1
						corr+=r_coef(frag.get_Y_data(), frag2.get_Y_data())
			if k and corr/k <cor_threshold:
				peak.get_frag_cluster().set_valid(False)
			elif k and corr/k >cor_threshold:
				peak.get_frag_cluster().set_valid(True)
			peak.get_frag_cluster().set_intra_corr(corr/k)
	
	print "Merging interesting peaks"
	for peak in peaks_with_iso._gen_peaks():
		if peak not in matching_peaks:
			matching_peaks.append(peak)
	print "finished..."
	print time.clock()-t
	return matching_peaks, final_list
	#final_peaklist =obj.RawPeakList()
	#final_peaklist += 	
	
#tree
def recursive_merging(peaklist, sroot):
	for son in root.get_frag_cluster():
		res = matching_peaks.peak(add.get_trans_prec(),add.get_rt())
		if res:
			if res.get_frag_cluster():
				for son in res.get_frag_cluster():
					if son not in peak.get_frag_cluster():
						peak.get_frag_cluster().append(son)
				recursive_merging(res)
			if not son.frag_cluster():return
"""				
DFS (graphe G, sommet s)
{
  Marquer(s);
  POUR CHAQUE element s_fils de Voisins(s) FAIRE
     SI NonMarque(s_fils) ALORS
       DFS(G,s_fils);
     FIN-SI
  FIN-POUR
}
"""
