#! usr/bin/python

import re
import objects as obj
#import time
#from PyQt4 import QtCore
from math import *
import numpy as np
import copy 

#HELPING FUNCTIONS
"""processing data format during parsing"""
def timeformat(tim):
		cor_time=""
		for i in range(2,len(tim)-1):
			cor_time+=tim[i]
		return float(cor_time)


def intensityformat(inten):
		if "e" in inten:
				o=n=""
				number_zero=""
				l=inten.split("e")
				i=0
				while l[1][i]=="0" or l[1][i]=="+":
						i+=1
				while i <len(l[1]):
						number_zero+=l[1][i]
						i+=1
				m=l[0].split(".")
				if len(m[1]) > int(number_zero):
						j=0
						while j < number_zero:
								n+=m[1][j]
								j+=1
						return m[0]+n
				else: 
						for i in xrange (int(number_zero) - len(m[1])):
								o+="0"
						return m[0]+m[1]+o
		return float (inten)

def sort(self,l, g, d):
			"""sorting by the precusor"""
			pivot = [((g+d)//2)].x
			i = g
			j = d
			while True:
				while self[i].x < pivot: i += 1
				while self[j].x > pivot: j -= 1
				if i>j: break
				if i<j: self[i], self[j] = self[j], self[i]
				i+=1
				j-=1
			if g<j: self._sort(g,j)
			if i<d: self._sort(i,d)





"""Chromatogram Creations"""
"""Belongs to the MODEL"""

def Chromato_creation(scans):
	
		Chromato_list = []
		Clusters={}
		#last_key = (scans[0].get_precursor(), scans[0].get_fragment() ) #1,2
		
		for scan in scans:
			if not (scan.get_precursor(), scan.get_fragment()) in Clusters.keys():
				Clusters[( scan.get_precursor(), scan.get_fragment()) ]=[obj.Data2D(scan.get_rt(), scan.get_intensity())]
			Clusters[(scan.get_precursor(), scan.get_fragment())].append(obj.Data2D(scan.get_rt(), scan.get_intensity()))
		#last_key = (scan.get_precursor(), scan.get_fragment())
		
		for keys in sorted( Clusters.iterkeys() ):
				chrom= obj.Chromatogram(obj.MS2_trans(keys), Clusters[keys] )
				Chromato_list.append(chrom)
				
		del Clusters
		return Chromato_list


def Spectra_creation(scans, min_time, max_time, time_window):
		time_windows=[];GroupedScan={};spectrumList=[]
		time=float(min_time)
		time_windows.append(time)
		
		while float(max_time) - time > time_window:
			time += time_window
			time_windows.append(time)
		lost_time=float(max_time) - time_windows[-1]
		
		#s=clock()
		real_scan=max_range=0
		last_key = (time_windows[1], time_windows[2]) # initialisation allow first iteration
		for scan in scans:
			#max_range=0;
			real_scan += 1
			"""scan classified in order of retention time"""
			while time_windows[max_range] < scan.get_rt() and max_range < len(time_windows)-1:
					max_range += 1
			key=(time_windows[max_range-1], time_windows[max_range])
			#print key
			#if not key in GroupedScan.keys(): # TODO test trop long a modifier
			if key != last_key:
					GroupedScan[key]=[]
			GroupedScan[key].append( obj.Data2D(scan.get_precursor(), scan.get_intensity())) #obj.Data2D change to add only required Fields
			last_key = tuple(key)
			
		for key in sorted( GroupedScan.iterkeys() ):
			spectr= obj.Spectrum(key[0], key[1], GroupedScan[key])
			#spectr.Fillpeaks()
			spectrumList.append(spectr)
		#print len(GroupedScan),"spectra found with a time cycle of",time_window,"seconds"
		del GroupedScan # clean memory 
		return spectrumList


#DEPRECATED
def apply_cut_off(lspl, value): #get the same list ???? becarfull!
	print ("Warning, leads to loose informations...")
	#must create a new list
	for spl in lspl:
		l =[]
		for masspeak in spl.getSpectra():
			if masspeak.get_y() > value:
				l.append(masspeak)
		spl.setSpectra(l)
	### keeping the same list
	for spl in lspl:
		index=0
		while index < len(spl.getSpectra()):
			if spl.getSpectra()[index].get_y() < value:
				del spl.getSpectra()[index]		
	

"""read peaks file, and add peaks in Sample object"""
""" Belong to the MODEL"""
## must read config file 
def creating_peakList (lspl):# to put to processing
	peaklist = []
	with open( "peaks", 'r') as f: #self.transformed_xml +
		lines = f.readlines()
		count_line =1;first = 1
		
		while count_line < len(lines):
			
			peaks = obj.RawPeakList()
			while int (lines[count_line].split(';')[13].strip()) == first : #TODO boucle while de trop^^
				res = lines[count_line].split(';')
				
				if lspl[first-1].is_trans_in(float(res[1])):
					
					chromatogram = lspl.SampleTrans(lspl.getFiles()[first-1], float(res[1]))
					# recuperation points for plotting peak and r calculation
					data = chromatogram._getslice(float(res[5]), float(res[6]))

					peak = obj.chromatoPeak(obj.MS2_trans((res[1],0)), res[4],res[5],res[6], res[7], chromatogram,data) 
					peaks.add_peak(peak)
					# ajout peaklist object Sample
					
				count_line += 1
				if count_line == len(lines):
					break
			lspl[first-1].set_peak_list (peaks)
			peaklist.append(peaks)
			first += 1
			
	for spl in lspl:
		print "len peaks", len(spl.get_peak_list())
	return peaklist

"""
TODO: think about it
"""
def Gaussian_filter ( points ):pass
	   
#return points
def sort_by_max_mass(lst, g, d):
	pivot = [((g+d)//2)].x
	i = g
	j = d
	while True:
		while lst[i].getMaxMass() < pivot: i += 1
		while lst[j].getMaxMass() > pivot: j -= 1
		if i>j: break
		if i<j: lst[i], lst[j] = lst[j], lst[i]
		i+=1
		j-=1
	if g<j: sort_by_max_mass(g,j)
	if i<d: sort_by_max_mass(i,d)
		

"""MERGING SAMPLES"""
def Merging_Spectra (lspl, error = 0.02):
	"""
	add data point with a the same retention time
	"""
	# merging spectrum of the original file but splitted by the conversion 3 injections
	
	spl_reference =copy.deepcopy(sorted(lspl, key =lambda spl: spl.getChromatogram()[0].get_trans().get_prec())[0])
	for spl in lspl:
		if spl.getXmlFile()!= spl_reference.getXmlFile():
			for spectrum, spectrum_ in zip(spl_reference.getSpectra(), spl.getSpectra()):
				spectrum.mass.extend([point.get_x() for point in spectrum_.get_points()])
				spectrum.intens.extend([point.get_y() for point in spectrum_.get_points()])
	return spl_reference.getSpectra()
		
	
	
def Merging_Peaks(lspl):
	"""
	Just Merge peaks
	"""
	peaks = obj.RawPeakList()
	for spl in lspl:
		peaks.extend(spl.get_peak_list())
	return peaks	
		

def Merging_Chromatograms(lspl):
	"""
	merge chromatos
	"""
	chrom =[]
	for spl in lspl:
		chrom.extend(spl.getChromatogram())
	return chrom

def Chrom_creation_by_spectr(spectra):
	l =[]; clusters={}
	for spectr in spectra:
		for p in spectr:
			if p.get_x() not in clusters.keys():
				clusters[p.get_x()]=[]
			else:
				clusters[p.get_x()].append(obj.Data2D(spectr.t_max, p.get_y()))
	for key, value in clusters.iteritems():
		c = obj.Chromatogram(MS2_trans((key, "NA")), value)
		l.append(c)
	return l





#################################################################################
#									FIEHN

"""
formula as a dict {C:6 ....}
abundoncy as a dict {C: [0.98...,...]} determined by the reading file config element
"""
def PatternCalculation (formula):
	massif = np.array([1.])
	for key in formula.iterkeys():
		for i in xrange (formula[key]):
			massif =np.convolve(key.get_abundoncy(), massif)
	return list(massif)
				
		

#see canvasGL
def Normalization (points):pass

def filtering_on_ratios( f,ratios ={"OC_threshold":0,"HC_threshold":0,"PC_threshold":0,"NC_threshold":0,"SC_threshold":0}):
	
		OC_ratio = f.get_number_of("O")/float(f.get_number_of("C"))
		HC_ratios = f.get_number_of("H")/float(f.get_number_of("C"))
		PC_ratios = f.get_number_of("P")/float(f.get_number_of("C"))
		NC_ratios = f.get_number_of("N")/float(f.get_number_of("C"))
		SC_ratios = f.get_number_of("S")/float(f.get_number_of("C"))
		if OC_ratio < ratios["OC_threshold"] and HC_ratio < ratios["HC_threshold"]and PC_ratio < ratios["PC_threshold"] \
				and NC_ratio < ratios["NC_threshold"] and SC_ratio < ratios["SC_threshold"]:
			return True
		return False


def idms_filter(N, formula):
	#N = number of predicted carbon
	#filter idms
	if not isinstance (formula, obj.Formula):
		raise TypeError, "formula must be a Formula object", type(formula)
	if formula.get_number_of("C") != N:
		return False
	return True
	
	

def apply_score (formula, cluster, error_massif):
	 #error massif percentage of max pic
	form_to_test = formula.pattern_generation()
	form =cluster.get_isotopic_value()
	
	#TODO normalize
	max = max(form)
	
	
if __name__=="__main__":pass
	
