import os
import sys
import shutil
import numpy
import datetime
import pickle
import math
import scipy
import scipy.stats
import scipy.signal
import scipy.optimize
import lmfit  ## NEW MODULE!!!
import pylab
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import dates as dates
from matplotlib.colors import LogNorm

ln2 = numpy.log(2)
import isotopes

class esc_run:
    """
    This handles a set of ESC files associeated with an ESC run
    It opens all files associeated with the run if you give it a zip file or directory
    It reads them in and populated this.data and this.idata fields
    It can try to figure out the spectral peak assignments and determine the energy calibration
    It can fit the spectral shape of the final spectrum
    It can determine the number of counts in each peak as a function of time
    """
    def __init__(self,run_path=None, verbose=False):
        self.verbose = False
        self.alpha_list = [] #['Po210','Po214','Po218','Po216','Po212']
        #parse arguments
        if verbose == True:
            print "Setting Verbose = True"
            self.verbose = True
        # If a run_path was set at init then just start opening files (why wait)
        if 'run_path' != None:
            self.run_path = run_path
            if self.run_path[-1] == '/': self.run_path=self.run_path[:-1]
            self.open_runfiles()
            #use the last spectrum as the sum spectra as a default
            self.sum_spec = self.data[-1]
        else:
            print "No run files specified."
        # Fill out constants derived from decay chains in isotopes table
        self.get_decay_chains()
        self.get_U_chain_matrix()
        #self.get_Th_chain_matrix()
    
    def set_run_path(self, path):
        """Set the path of the CHN data for this run;
        either directory containing CHN files, or the ZIP file"""
        # We prefer paths to not have the tail '/'
        if run_path[-1] == '/': run_path = run_path[:-1]
        self.run_path = run_path
        
    def open_runfiles(self):
        """
        Opens files by grabbing all the run_file binary data streams and 
        passes them to the readchn class, which returns the parsed data. 
        WIPP runs have YYMMDDE5_###.chn file names.
        SNOLab runs have YYMMDDE?.### file names.
        """
        if self.verbose: print "open_runfiles: RUN_PATH=",self.run_path
        self.files = {}
        # ZIP file Extraction
        # Assuming that we only have zip files that are same run/whatever...
        # WIPP does not ZIP files does it.... Maybe we should zip them....
        # I would like zips to be default.
        if self.run_path.split('.')[-1] in ['zip','ZIP']:
            self.run_file_type = 'zip'
            import zipfile
            zf = zipfile.ZipFile(self.run_path)
            for f in zf.filelist:
                if f.filename.split('.')[-1].isdigit() or f.filename.split('.')[-1] in ['chn','CHN']:
                    self.files[f.filename] = readchn(zf.open(f,'r').read())
                if f.filename.split('.')[-1].lower() == 'log':
                    self.run_log = zf.open(f,'r').readlines()
        # Directory file Extraction
        else:
            if self.verbose: "Opening files in dir: %s"%(self.run_path)
            for f in os.listdir(self.run_path):
                if f.split('.')[-1] in ['chn','CHN'] or f.split('.')[-1].isdigit():
                    self.files[f] = readchn(open(self.run_path +'/'+ f, 'r').read())
                elif f.split('.')[-1].lower() == 'log':
                    self.run_log = open(self.run_path +'/'+ f).readlines()
        # This is just a quick check that all the files have the same initial character 
        # set, i.e. YYMMDDE# is all the same
        for f in self.files.keys():
            if f[0:8] != self.files.keys()[0][:8]:
                print self.files.keys()[0][:8],' != ',f[0:8]
                raise UnexpectedShit("open_runfiles: Check your runfiles, found different runs mixed in")
        # Get MCA channel data so we know how big the count array needs to be
        self.nchan = self.files[self.files.keys()[0]].nchan
        # Number of intervals
        self.inum = len(self.files)
        # Make data arrays
        self.data = numpy.zeros((self.inum,self.nchan))
        self.idata = numpy.zeros((self.inum,self.nchan))
        # Make array for livetime and realtime data
        self.livetime = numpy.zeros(self.inum)
        self.ilivetime = numpy.zeros(self.inum)
        self.realtime = numpy.zeros(self.inum)
        self.irealtime = numpy.zeros(self.inum)
        self.elapsed_time_sec = numpy.zeros(self.inum) #I guess this will be the time since start to the bin center
        self.istarttime = []
        self.iendtime = []
        self.datetime = []
        self.header = []
        self.acqtimes = []
        # Get the data for the run files, this sort puts them in order of acquisition (hopefully)!!!
        for i,k in enumerate(sorted(self.files.keys())):
            if i==0:
                self.runstarttime = self.files[k].runstarttime
            if i>0 and self.runstarttime != self.files[k].runstarttime:
                print "Problem. Runs dont start at the same time!?!, spec=",i+1
            self.data[i] = self.files[k].data
            self.livetime[i] = self.files[k].livetime
            self.realtime[i] = self.files[k].realtime
            self.acqtimes.append(self.files[k].header[6]) # String
            self.header.append({'ftype':self.files[k].header[0],'mca':self.files[k].header[1],'segment':self.files[k].header[2],
                                'secs':self.files[k].header[3],'realtime':self.files[k].header[4],'livetime':self.files[k].header[5],
                                'acqtime':self.files[k].header[6],'channoffset':self.files[k].header[7],'nchan':self.files[k].header[8]})
        # Make array with only the interval accumulated counts now
        for i in range(len(self.files)):
            if i==0:
                self.idata[i] = self.data[i]
                self.ilivetime[i] = self.livetime[i]
                self.irealtime[i] = self.realtime[i]
                #TODO: Check realtime>=livetime
                self.istarttime.append(self.runstarttime)
                self.datetime.append(self.runstarttime + datetime.timedelta(seconds=self.irealtime[i]/2.))
                self.iendtime.append(self.istarttime[0] + datetime.timedelta(seconds=self.irealtime[i]))
            else:
                self.idata[i] = self.data[i] - self.data[i-1]
                self.ilivetime[i] = self.livetime[i] - self.livetime[i-1]
                self.irealtime[i] = self.realtime[i] - self.realtime[i-1]
                #TODO: Check realtime>=livetime
                self.istarttime.append(self.runstarttime + datetime.timedelta(seconds=self.realtime[i-1]))
                self.iendtime.append(self.runstarttime + datetime.timedelta(seconds=self.realtime[i]))
                self.datetime.append(self.istarttime[i] + datetime.timedelta(seconds=self.irealtime[i]/2.))
            if min(self.idata[i])<0:
                print "WARNING: Somehow got negative interval counts, this should not happen!!!"
                print " Spectrum %d"%i
                for j,d in enumerate(self.idata[i]):
                    if d < 0:
                        print " Channel %d"%j
                        self.idata[i][j] = 0
            self.elapsed_time_sec[i] = (self.datetime[i] - self.runstarttime).total_seconds()
        print "Sucessfully loaded %d spectrum into self.data"%len(self.data)
        if self.verbose>1:
            s = "Each spectrum is indexed as a list object, starting from 0.\n"
            s+= "A.data[i] = sum counts\n"
            s+= "A.idata[i] = interval counts\n"
            s+= "A.livetime[i] and A.realtime[i]\n"
            s+= "A.header[i] has all the header information as a dictionary, keys are:"
            print s
            print self.header[0].keys()
        
    def fitfun_gauss_lorentz(self,p):
        """
        Returns y=fn(x) for lorentz<x<gauss function given
          p[0] = Amplitude
          p[1] = Peak Center
          p[2] = Gaussian sigma
          p[3] = Lorentzian sigma
        """
        X = numpy.arange(0,self.sum_spec.size)
        Y = numpy.zeros(self.sum_spec.size)
        for i,x in enumerate(X):
            if x>p[1]:
                Y[i] = p[0] * math.exp(-0.5*((x-p[1])/p[2])**2.)
            else:
                Y[i] = p[0] * (p[3]/2.)**2./((x-p[1])**2.+(p[3]/2.)**2.)
            if Y[i] < 1.e-6: Y[i] = 0
        return Y
    
    def multi_peak_spec(self,p):
        """
        Returns the model spectrum for N peaks where the parameters for each peak is:
            A) A list p (len(p)%4=0) where each peak is specified by 4 parameters
            B) A lmfit.Parameters() class that has the parameters <alpha>_[p0,A0,gw,lw]
        """
        X = numpy.arange(0,self.sum_spec.size)
        Y = numpy.zeros(X.size)
        # Parameters spelled out in case A)
        if type(p) == type([]):
            if len(p)%4 != 0: raise ValueError('The number of parameters passed to multi_peak_spec was wrong')
            for i in range(len(p)/4):
                Y += self.fitfun_gauss_lorentz(p[i*4:i*4+4],X)
        # Parameters spelled out in case B)
        elif type(p) == type(lmfit.Parameters()):
            for alpha in self.alpha_list:
                if 'gw' in p.keys() and 'lw' in p.keys():
                    # Were using a single resolution minimization
                    peak_par = [p['%s_A0'%alpha].value, p['%s_p0'%alpha].value, p['gw'].value, p['lw'].value]
                else:
                    peak_par = [p['%s_A0'%alpha].value, p['%s_p0'%alpha].value, p['%s_gw'%alpha].value, p['%s_lw'%alpha].value]
                Y += self.fitfun_gauss_lorentz(peak_par)
        return Y
    
    def multi_peak_nll(self,p):
        """
        Returns NLL of multi spec shape vs data:
          NLL = self.multi_peak_spec(p) - data * numpy.log(self.multi_peak_spec(p)) 
        """
        return self.multi_peak_spec(p) - data * numpy.log(self.multi_peak_spec(p)) 
        
    def initial_find_peaks(self,width=45):
        """
        Finds peaks in sum_spec with the SciPy (version >0.11) builtin find_peaks_cwt() function. 
        This has an input parameter which should be tuned for the data, at WIPP use ~90"""
        #Find biggest peaks in the range (trivial peaks)
        self.initial_peaks = scipy.signal.find_peaks_cwt(self.sum_spec,numpy.arange(1,width))
        self.initial_peaks.sort()
        if self.verbose:
            print "initial_find_peaks:", self.initial_peaks

    def guess_sigmas(self,peak_pos):
        """
        Takes the sum_spec and trys to determine the peak widths (sigma) based on
        value of the half height width. Returns (Laurentz_sigma, Gauss_sigma)
        The relation for sigma to FWHM is:
        Lorentz_FWHM = sigma
        Gauss_FWHM   = 2.355*sigma
        """
        peak_height = self.sum_spec[peak_pos]
        gauss_width, lorentz_width = None, None
        for x in range(40):
            if peak_pos-x < 0: 
                break
            if self.sum_spec[peak_pos-x] < peak_height/2.:
                lorentz_width = float((x-0.5)*2.0)
                break
        for x in range(40):
            if peak_pos+x > self.nchan:
                break
            if self.sum_spec[peak_pos+x] < peak_height/2.:
                gauss_width = (x-0.5)*2.0/2.355
                break
        if None in (gauss_width,lorentz_width):
            print "guess_sigmas failed to find the peak width! Too close to the edge?"
        if self.verbose and None not in (gauss_width,lorentz_width):
            print "guess_sigmas:"
            print "Lorentz_width=%s, Gauss_width=%s"%(lorentz_width, gauss_width)
        return lorentz_width, gauss_width

    def initial_fit_peaks(self):
        """
        Fits peaks based on initial position guess (initial_find_peaks)
        Performs NLL minimization based on model of sum of gauss_lorentz
        """
        #parameter_order ['Amp','Center','Gaus','Lorentz']
        self.initial_parameters  = []
        self.initial_param_bounds = []
        #self.par_initial = []
        #self.par_bounds  = []
        for i,p in enumerate(self.initial_peaks):
            lw,gw = self.guess_sigmas(p)
            amplitude = self.sum_spec[p]
            if 'gsig' in self.esc_initials.keys():
                gsig = self.esc_initials['gsig']
            else: gsig = gw
            if 'lsig' in self.esc_initials.keys():
                lsig = self.esc_initials['lsig']
            else: lsig = lw
            self.initial_parameters += [amplitude, p, gsig, lsig]
            self.sum_spec
            self.initial_param_bounds += [(.25*p,2.*p), (p-1.,p+1.), (lw*0.5,gw*2.0),(lw*0.5,lw*2.0)]
        print 'Initials:', self.initial_parameters
        self.ret1 = scipy.optimize.minimize(self.multi_peak_nll,self.initial_parameters,method='L-BFGS-B',
                bounds=self.initial_param_bounds,tol=0.000005,options={'disp':True})
        #self.ret1 = scipy.optimize.minimize(self.multi_peak_nll,self.initial_parameters,method='BFGS',options={'disp':True})
        #self.ret1 = scipy.optimize.minimize(self.multi_peak_nll,self.initial_parameters,options={'disp':True})
    
    def linear_energy_fit(self, peak_list, alpha_list):
        """
        TODO What does this DO?
        """
        alpha_energy = []
        for alpha in alpha_list:
            if alpha == 'Zero':
                alpha_energy.append(0.0)
            else:
                alpha_energy.append(isotopes.decays[alpha][0]['energy'])
        alpha_energy.sort()
        peak_list.sort()
        alpha_energy = numpy.array(alpha_energy)
        peak_list = numpy.array(peak_list)
        parms = numpy.polyfit(alpha_energy,peak_list,1)
        chi2pdf = numpy.sum((numpy.polyval(parms, alpha_energy) - peak_list) ** 2)/2.
        if self.verbose:
            print alpha_energy,peak_list
            print "Close the plot to continue!!"
            plt.plot(alpha_energy,peak_list,'o')
            plt.plot(alpha_energy,numpy.polyval(parms,alpha_energy))
            plt.title('Linear Energy Fit y=%0.3fx+%0.3f'%(parms[0],parms[1]))
            plt.xlabel('Energy (MeV)')
            plt.ylabel('Channel #')
            plt.show() 
        return parms,chi2pdf
    
    def peak_assign_manual(self, iso1, chn1, iso2, chn2): #Po210=None, Po212=None, Po214=None, Po216=None, Po218=None
        """
        This requires four arguments, (isotope1, channel1, isotope2, channel2) to set the energy scale
        Isotopes needs to be one of ['Po210','Po212','Po214','Po216','Po218']
        """
        print "User setting peak positions/energy scale:"
        parms,chi2pdf = self.linear_energy_fit([chn1,chn2],[iso1,iso2])
        if self.verbose: print parms,chi2pdf
        # Save parameters and calibrated channel energy array to self
        chns = numpy.arange(0,self.sum_spec.size)
        # This is just for makeing a plot
        if self.verbose:
            plt.subplot(2,1,1)
            plt.plot(self.sum_spec)
            plt.xlim(0,self.sum_spec.size)
            plt.xlabel('Channel #')
            plt.ylabel('Counts')

            plt.title('Linear Energy Fit y=%0.3fx+%0.3f'%(parms[0],parms[1]))
            plt.subplot(2,1,2)
            for i,alpha in enumerate(self.alpha_list):
                x = isotopes.decays[alpha][0]['energy']
                y = max(self.sum_spec)
                plt.text(x,y,alpha,rotation='vertical')
                plt.vlines(x,0,max(self.sum_spec),colors='g',linestyles='dashed',label=alpha)
            x = (chns - parms[1])/parms[0]
            plt.plot(x,self.sum_spec)
            plt.xlim(4,10)
            plt.title('Smart Guess Peaks')
            plt.xlabel('Energy (MeV)')
            plt.ylabel('Counts')
            plt.show()
        self.energy_parms = parms
        self.chn_energy   = (chns - parms[1])/parms[0]
        self.energy_parms_chi2pfd = chi2pdf
        self.smart_peaks  = {}
        for alpha in self.alpha_list:
            print alpha,isotopes.decays[alpha][0]
            self.smart_peaks[alpha] = (int(numpy.polyval(self.energy_parms,isotopes.decays[alpha][0]['energy'])))
        print "PEAK_ASSIGN_MANUAL: going with this calibration"

    def smart_guess_peaks(self, big_peak_thresh = 0.5, med_peak_thresh = 0.05):
        """
        Tries to guess peaks based on number of peaks, and linear energy scale
        smart guess starts by assuming U238 spectrum...
        """
        parms = None

        big_peak_h = big_peak_thresh * max(self.sum_spec)
        med_peak_h = med_peak_thresh * max(self.sum_spec)
        big_peaks, med_peaks = [], []
        peaks_ordered = []
        if self.verbose: print "PeakPos\tHeight\tLorenzW\tGaussW"
        for p0 in self.initial_peaks:
            p0l,p0g = self.guess_sigmas(p0)
            if self.verbose:
                print "%d\t%d\t%s\t%s"%(p0,self.sum_spec[p0],p0l,p0g)
            if None in (p0l,p0g):
                continue
            # Sometimes the first peak in an uncropped spectrum is the betas.
            # This has a radically different shape than the other peaks
            if (p0g > 1.5*p0l and self.sum_spec[p0] > med_peak_h) or p0g > 8.:
                print "SMRT GUESS: Peak at %d likley beta! Skipping it"%p0
                continue
            peaks_ordered.append((p0,self.sum_spec[p0]))
            if self.sum_spec[p0] >= big_peak_h:
                big_peaks.append(p0)
            if self.sum_spec[p0] >= med_peak_h and self.sum_spec[p0] < big_peak_h:
                med_peaks.append((p0,self.sum_spec[p0]))
        big_peaks.sort()
        med_peaks.sort()
        peaks_ordered.sort(key=lambda k: k[1], reverse=True)
        if self.verbose: print "Big Peaks = ", big_peaks
        if self.verbose: print "Med Peaks",med_peaks
        if self.verbose: print "Peaks Ordered",peaks_ordered
        print peaks_ordered[0][0]
        
        if len(big_peaks)+len(med_peaks) >= 3:
            # Lets try taking the biggest 3 peaks and assume the spectrum is U238
            parms,chi2pdf = self.linear_energy_fit([peaks_ordered[0][0],peaks_ordered[1][0],peaks_ordered[2][0]],
                                                   ['Po210','Po218','Po214'])
            if self.verbose: print parms,chi2pdf

        if len(big_peaks)+len(med_peaks) == 2:
            # Lets assume the peaks are Po214 and Po218 I guess
            parms,chi2pdf = self.linear_energy_fit([peaks_ordered[0][0],peaks_ordered[1][0]],['Po218','Po214'])
            if self.verbose: print parms,chi2pdf

        if parms == None:
            print "SMRT GUESS is not smart enough to figure this out, yet"

        print "Parms",parms
        if parms != None: # and chi2pdf < 1:
            print "SMRT GUESS: going with this calibration"
            chns = numpy.arange(0,self.sum_spec.size)
            # This is just for makeing a plot
            if self.verbose:
                plt.subplot(2,1,1)
                plt.plot(self.sum_spec)
                plt.xlim(0,self.sum_spec.size)
                plt.xlabel('Channel #')
                plt.ylabel('Counts')

                plt.title('Linear Energy Fit y=%0.3fx+%0.3f'%(parms[0],parms[1]))
                plt.subplot(2,1,2)
                for i,alpha in enumerate(self.alpha_list):
                    x = isotopes.decays[alpha][0]['energy']
                    y = max(self.sum_spec)
                    plt.text(x,y,alpha,rotation='vertical')
                    plt.vlines(x,0,max(self.sum_spec),colors='g',linestyles='dashed',label=alpha)
                x = (chns - parms[1])/parms[0]
                plt.plot(x,self.sum_spec)
                plt.xlim(4,10)
                plt.title('Smart Guess Peaks')
                plt.xlabel('Energy (MeV)')
                plt.ylabel('Counts')
                plt.show()
                
            # Save parameters and calibrated channel energy array to self
            self.energy_parms = parms
            self.chn_energy   = (chns - parms[1])/parms[0]
            self.energy_parms_chi2pfd = chi2pdf
            self.smart_peaks  = {}
            for alpha in self.alpha_list:
                #print alpha,isotopes.decays[alpha][0]
                self.smart_peaks[alpha] = (int(numpy.polyval(self.energy_parms,isotopes.decays[alpha][0]['energy'])))

    def smart_fit_peaks(self):
        """
        Rather than fit all the peaks which initial_find_peaks finds, use the smart_guess_peaks
        to fit the peaks that correspond to alphas
        """
        ## TODO: THE RESOLUTION IS PROBABLY NOT INDEPENDENT! Make methos w/ single resolution parm?
        alpha_peak_chns = []
        self.alpha_fit_parameters = {} # Try to orginize the fit into a list of dict objects
        for alpha in self.alpha_list:  #TODO: THIS FITS ALL ALPHAS?!?!
            print isotopes.decays[alpha][0]
            #alpha_peak_chns.append(int(numpy.polyval(self.energy_parms,decays[alpha][0]['energy'])))
            self.alpha_fit_parameters[alpha] = []
        #print "Chn peaks:",alpha_peak_chns
        self.smart_parameters  = []
        self.smart_param_bounds = []
        #for i,p in enumerate(alpha_peak_chns):
        for alpha in self.alpha_list:
            peak_chn = self.smart_peaks[alpha]
            lw,gw = self.guess_sigmas(peak_chn)
            amplitude = self.sum_spec[peak_chn]
            self.smart_parameters += [amplitude, peak_chn, gw, lw]
            #self.sum_spec
            self.smart_param_bounds += [(amplitude*0.5,amplitude*1.25), (peak_chn-1.,peak_chn+1.), (gw*0.5,gw*2.),(lw*0.5,lw*2.0)]
        print 'Initials:', self.smart_parameters
    
        # SCIPY Method
        self.smart_fit_result = scipy.optimize.minimize(self.multi_peak_nll,self.smart_parameters,method='L-BFGS-B',
                bounds=self.smart_param_bounds,tol=0.0005,options={'disp':True})
        
        #print self.smart_fit_result
        for i,alpha in enumerate(self.alpha_list):
            self.alpha_fit_parameters[alpha] = self.smart_fit_result.x[i*4:i*4+4]
            print "initial:",self.smart_parameters[i*4:i*4+4]
            print "Fit:    ",self.smart_fit_result.x[i*4:i*4+4]
        
        # Make PDF of peaks
        if self.verbose: print "Making PDF's of fit peaks"
        self.peak_pdfs = {}
        for ia,alpha in enumerate(self.alpha_fit_parameters.keys()):
            self.peak_pdfs[alpha] = self.fitfun_gauss_lorentz(self.alpha_fit_parameters[alpha])

        # We dont want to use the tails too much in assigning peaks, so small tails are zeroed
        for alpha in self.peak_pdfs.keys():
            pdf = self.peak_pdfs[alpha]
            pdf[pdf/pdf.max() < 1.e-4] = 0
            self.peak_pdfs[alpha] = pdf
             
        if self.verbose:
            print "SMRT FIT PARAMETERS:"
            print self.smart_fit_result
            plt.plot(self.sum_spec)
            leg = []
            for alpha in self.peak_pdfs.keys():
                plt.plot(self.peak_pdfs[alpha])
                leg.append(alpha)
            plt.legend(['data']+leg)
            plt.title('Smart Fit Peaks')
            plt.xlabel('Channel #')
            plt.yscale('log')
            plt.show()

    def fit_peaks_lmfit(self, float_all = False):
        """
        Fits peak shape based on initial alpha peak position guesses.
        Uses LMFIT package to improve the way variables are correlated in fit. 
        LMFIT uses least-squares fitting technique. This function needs peak
        assignment to be done before it can run (uses self.alpha_peaks attribute).
        
        float_all=<bool> (optional) determines if each peak get to float the gauss/lorentz widths.
        """
        method_start_time = datetime.datetime.now()
        # NOTE: This function added crap to multi_peak_spec, so if you delete this
        # method, clean that up too!!
        # Trying to get a good idea of peak widths for initial values
        val_gaus = []
        val_laur = []
        val_height = []
        for alpha in self.alpha_list:
            lw, gw = self.guess_sigmas(self.smart_peaks[alpha])
            if None in (lw,gw): lw,gw = 10.,2.
            val_gaus.append(gw)
            val_laur.append(lw)
            val_height.append(self.sum_spec[self.smart_peaks[alpha]])
        gaus = val_gaus[val_height.index(max(val_height))]
        laur = val_laur[val_height.index(max(val_height))]
        if self.verbose:
            print "  Max peak alpha = %s"%self.alpha_list[val_height.index(max(val_height))]
            print "  Gaussian Width = %.2f"%gaus
            print "  Laurentzian w  = %.2f"%laur
        peak_fit_parms = lmfit.Parameters()
        for alpha in self.alpha_list:
            val_p0 = self.smart_peaks[alpha]
            min_p0 = max([0, val_p0-5])
            max_p0 = min([1024, val_p0+5])
            val_A0 = self.sum_spec[self.smart_peaks[alpha]]
            peak_fit_parms.add('%s_p0'%alpha, vary=True, value = val_p0, min=min_p0, max=max_p0)
            peak_fit_parms.add('%s_A0'%alpha, vary=True, value = val_A0, min = 0, max = max([val_A0*1.5,100]))
            if float_all == True:
                peak_fit_parms.add('%s_gw'%alpha, vary=True, value = gaus,   min = 1,  max = gaus*2.)
                peak_fit_parms.add('%s_lw'%alpha, vary=True, value = laur,   min = 1,  max = laur*2.)
        if float_all == False:
            peak_fit_parms.add('gw', vary=True, value = gaus,   min = 1,  max = gaus*2.)
            peak_fit_parms.add('lw', vary=True, value = laur,   min = 1,  max = laur*2.)
        # The residual function
        resid = lambda parms: (self.sum_spec - self.multi_peak_spec(parms))**2
        # The fitting call
        result = lmfit.minimize(resid, peak_fit_parms)
        self.peak_fit_parms = {}
        for alpha in self.alpha_list:
            if float_all == True:
                self.peak_fit_parms[alpha] = [
                    peak_fit_parms['%s_p0'%alpha].value,
                    peak_fit_parms['%s_A0'%alpha].value,
                    peak_fit_parms['%s_gw'%alpha].value,
                    peak_fit_parms['%s_lw'%alpha].value]
            else:
                self.peak_fit_parms[alpha] = [
                    peak_fit_parms['%s_p0'%alpha].value,
                    peak_fit_parms['%s_A0'%alpha].value,
                    peak_fit_parms['gw'].value,
                    peak_fit_parms['lw'].value]
        self.peak_pdfs = {}
        for alpha in self.alpha_list:
            if 'gw' in peak_fit_parms.keys() and 'lw' in peak_fit_parms.keys():
                peak_par = [peak_fit_parms['%s_A0'%alpha].value, peak_fit_parms['%s_p0'%alpha].value,
                            peak_fit_parms['gw'].value, peak_fit_parms['lw'].value]
            else:
                peak_par = [peak_fit_parms['%s_A0'%alpha].value, peak_fit_parms['%s_p0'%alpha].value,
                            peak_fit_parms['%s_gw'%alpha].value, peak_fit_parms['%s_lw'%alpha].value]
            self.peak_pdfs[alpha] = self.fitfun_gauss_lorentz(peak_par)
        method_end_time = datetime.datetime.now()
        print "fit_peaks_lmfit took %d sec"%(method_end_time - method_start_time).total_seconds()
        # Verbose output plus plots
        if self.verbose:
            print "fit_peaks_lmfit results:"
            lmfit.report_fit(peak_fit_parms)
            # make fit result subplot
            fig = plt.figure(figsize=(10,10))
            f_sum = plt.subplot(211)
            f_sum.plot(self.chn_energy, self.sum_spec, label='sum spec')
            f_sum.plot(self.chn_energy, self.multi_peak_spec(peak_fit_parms), 'k.', alpha=0.4, label='sum fit')
            for alpha in self.alpha_list:
                f_sum.plot(self.chn_energy, self.peak_pdfs[alpha], label='%s fit'%alpha)
            f_sum.set_xlim(4,10)
            f_sum.set_yscale('log')
            f_sum.set_title('Peak Fits')
            f_sum.set_ylim(bottom=1e-2)
            f_sum.grid()
            plt.legend(loc=1, borderaxespad=0.)
            # make residual subplot
            f_res = plt.subplot(212)
            f_res.plot(self.chn_energy, self.sum_spec - self.multi_peak_spec(peak_fit_parms))
            f_res.set_xlim(4,10)
            f_res.set_ylabel('Counts')
            f_res.set_title('Residule')
            f_res.grid()
            #final = self.sum_spec + result.residual
            #plt.plot(self.chn_energy,self.sum_spec)
            #plt.plot(self.chn_energy,final)
            plt.show()
        
    def brute_force_peaks(self,sig = None, force_fit_zero = False):
        """
        Here we take a brute force approach to find what kind of spectrum it can be
        Take all permutations of significant alpha emitters on the diode and try to fit them
        """
        # THIS IS NOT BEING USED RIGHT NOW
        import itertools
        #alpha_list = ['Po210','Po214','Po218','Po216','Po212'] #TODO finish adding chains
        results = []
        if sig == None: sig = 0.1
        significant_peaks = []
        for p0 in self.initial_peaks:
            if self.sum_spec[p0] >= sig*max(self.sum_spec):
                significant_peaks.append((p0,self.sum_spec[p0]))
        print "brute_force_peaks",significant_peaks
        alpha_permutations = itertools.combinations(self.alpha_list,len(significant_peaks))
    
        if len(significant_peaks) < 3 or force_fit_zero:
            significant_peaks.append((0,0))
            significant_peaks.sort(key=lambda k: k[0])
            force_fit_zero = True

        for permute in alpha_permutations:
            alphas = list(permute[:])
            if force_fit_zero:
                alphas.append('Zero')
            #print alphas,significant_peaks
            parms,chi2pdf = self.linear_energy_fit(significant_peaks,alphas)
            results.append((chi2pdf,parms))
        results.sort(key=lambda k: k[0])
        print results[0]
        if len(results)>=2:
            print results[1]
        
    def compute_alpha_counts(self):
        """Get number of counts for each of the peaks for each time spectra"""
        self.alpha_counts = {}
        self.alpha_cpd = {}
        print "Computing Alpha Counts"
        # Start assigning channels to peaks
        #self.chan_pdf = numpy.zeros((self.sum_spec.size,len(self.alpha_list)))
        sum_pdfs = numpy.zeros(self.sum_spec.size)
        for alpha in self.peak_pdfs.keys():
            sum_pdfs += self.peak_pdfs[alpha]
            self.alpha_counts[alpha] = numpy.zeros(len(self.idata))
            self.alpha_cpd[alpha] = numpy.zeros(len(self.idata))
        
        for ispec, spec in enumerate(self.idata):
            for ichan,channel in enumerate(self.sum_spec):
                if spec[ichan] == 0: continue
                if sum_pdfs[ichan] == 0: continue   
                for i,alpha in enumerate(self.peak_pdfs.keys()):
                    self.alpha_counts[alpha][ispec] += (spec[ichan] * self.peak_pdfs[alpha][ichan]) / sum_pdfs[ichan]
    
        # Compute count rate in counts per day (CPD)
        for alpha in self.alpha_counts.keys():
            for i,counts in enumerate(self.alpha_counts[alpha]):
                self.alpha_cpd[alpha][i] = self.alpha_counts[alpha][i] / (self.ilivetime[i]/(3600.*24.))
    
        if self.verbose:
            # Make plot of alpha counts vs specta file
            leg = []
            for alpha in self.alpha_counts.keys():
                if alpha == 'Po210': continue
                print alpha, ": ", self.alpha_counts[alpha].sum()
                xerror = numpy.array((numpy.array(self.datetime) - numpy.array(self.istarttime),
                                      numpy.array(self.iendtime) - numpy.array(self.datetime) ))
                yerror = numpy.zeros((2,self.inum))
                tmp = scipy.stats.poisson.interval(0.682,self.alpha_counts[alpha])
                yerror[0,:] = numpy.array(self.alpha_counts[alpha]) - tmp[0]
                yerror[1,:] = tmp[1] - numpy.array(self.alpha_counts[alpha])
                
                plt.errorbar(self.datetime,self.alpha_counts[alpha],xerr=xerror,yerr=yerror,fmt='o')
                leg.append(alpha)
            leg_obj = plt.legend(leg)
            leg_obj.get_frame().set_alpha(0.5)
            plt.gca().xaxis.set_major_formatter(dates.DateFormatter("%m/%d %H:%M"))
            plt.ylabel('Counts')
            plt.xlabel('Time')
            plt.gcf().autofmt_xdate()
            print "Close plot to continue..."
            plt.show()

    def get_decay_chains(self):
        """
        This code just assembles the decay chain information needed by the fitting model.
        Since there are no dater dependencies, this can be run in _init_ (and is).
        After running this self.decay_chain_U and self.decay_chain_Th are available.
        These are lists of dictionary objects, describing the main decay path, T12, alpha_energy, etc...
        """
        # Uranium chain:
        chain_start = 'Ra226'
        chain_end   = 'Po214'
        U_chain = [isotopes.decays[chain_start][0]]
        U_chain[-1]['decay'] = chain_start
        while True:
            U_chain.append(isotopes.decays[U_chain[-1]['daughter']][0])
            U_chain[-1]['decay'] = U_chain[-2]['daughter']
            if U_chain[-1]['decay'] == chain_end:
                break
        # Thorium chain:
        chain_start = 'Th228'
        chain_end   = 'Po212'
        Th_chain = [isotopes.decays[chain_start][0]]
        Th_chain[-1]['decay'] = chain_start
        while True:
            Th_chain.append(isotopes.decays[Th_chain[-1]['daughter']][0])
            Th_chain[-1]['decay'] = Th_chain[-2]['daughter']
            if Th_chain[-1]['decay'] == chain_end:
                break
        if self.verbose:
            print 'Uranium decay_chain='
            for i in U_chain:
                print i
            print 'Thorium decay_chain='
            for i in Th_chain:
                print i
        self.decay_chain_U = U_chain
        self.decay_chain_Th = Th_chain
    
    def initial_conditions_estimate_U(self):
        """
        Guesses the initial concentrations of Rn222, Ra226 based on alpha counts without fitting.
        Alpha counting needs to be performed before this step!
        This uses a guess of count_eff_U to relate data to the number of actual atoms in the system.
        """
        # Fit a naive function of exponential decay and exponential growth p1*exp(-lt)+p2(1-exp(-lt))
        fun = lambda t,Rn0,Ra0: Rn0*numpy.exp(-t*ln2/isotopes.decays['Rn222'][0]['T12']) + Ra0*(1-numpy.exp(-t*ln2/isotopes.decays['Rn222'][0]['T12']))
        popt,pcov = scipy.optimize.curve_fit(fun, self.elapsed_time_sec, self.alpha_cpd['Po218'])
        # Orginize the return into an array of inital values
        N0 = numpy.zeros(len(self.decay_chain_U))
        for i,alpha in enumerate(self.decay_chain_U):
            if alpha['decay'] == 'Rn222':
                if popt[0]>0:
                    N0[i] = popt[0]/(3600.*24.) / (ln2/alpha['T12'])
            if alpha['decay'] == 'Ra226':
                if popt[1]>0:
                    N0[i] = popt[1]/(3600.*24.) / (ln2/alpha['T12'])
        # Tons of verbose output !!!!!!
        if self.verbose:
            print "initial_conditions_estimate_U:"
            print " Fit Rn0*exp(-t/tau)+Ra0*(1-exp(-t/tau) to Po218"
            print "  Rn0 = %.2f dpd"%popt[0]
            print "  Ra0 = %.2f dpd"%popt[1]
            print " bin_width ~ %d sec"%self.irealtime[0]
            print " CovMatrix = ", pcov
            if min(popt)<0:
                print " Setting negative fit values to zero"
            print " Initial guess populations is (in atoms):"
            for i,alpha in enumerate(self.decay_chain_U):
                print "   %s: %d,%.2e dpd"%(alpha['decay'],N0[i],N0[i]*ln2/(isotopes.decays[alpha['decay']][0]['T12']/3600./24.))
            leg = []
            model = self.U_chain_rate(N0)
            chi2 = {}
            for alpha in self.alpha_counts.keys():
                if alpha in ['Po210','Po212','Po216']: continue
                for i,a in enumerate(self.decay_chain_U):
                    if a['decay'] == alpha:
                        ialpha = i
                        break
                yerror = numpy.zeros((2,self.inum))
                tmp = scipy.stats.poisson.interval(0.682,self.alpha_cpd[alpha])
                yerror[0,:] = numpy.array(self.alpha_cpd[alpha]) - tmp[0]
                yerror[1,:] = tmp[1] - numpy.array(self.alpha_cpd[alpha])
                plt.errorbar(self.elapsed_time_sec, self.alpha_cpd[alpha], yerr=yerror, fmt='o', label='%s data'%alpha)
                leg.append('%s data'%alpha)
                plt.plot(self.elapsed_time_sec, model[ialpha]*3600.*24./self.ilivetime, label='%s model'%alpha)
                leg.append('%s fit'%alpha)
                chi2[alpha] = (self.alpha_counts[alpha] - model[ialpha])**2. / model[ialpha]
            print "Close plot to continue"
            leg = plt.legend(loc=1, borderaxespad=0.)
            leg.get_frame().set_alpha(0.5)
            plt.xlabel("Elapsed Time (sec)")
            plt.ylabel("Counts Per Day")
            plt.title('Initial estimate (U-chain) [A1*exp(-t/tau)+A2(1-exp(-t/tau))]')
            plt.show()
            print "Calculating Chi^2 of estimate:"
            for k in chi2.keys():
                print '  %s Chi2 = %.2f/(%d-1) = %.2f'%(k,numpy.sum(chi2[k]),len(chi2[k]), numpy.sum(chi2[k])/(len(chi2[k])-1))
        return N0
        
    def fit_single_nll(self,alpha,profile=False):
        """
        Fits a single alpha over time using minimize NLL method.
        Specify alpha as a string: 'Po218', etc
        """
        ## Does alpha_counts exist? If not no point in continuing...
        #if not hasattr(self,'alpha_counts'):
            #print "You need to assign and count alpha_counts before fitting"
            #return 0
        for i,a in enumerate(self.decay_chain_U):
            if a['decay'] == alpha:
                ialpha = i
                if self.verbose: print "%s has index %d"%(alpha,i)
                break
        
        N0_initial = self.initial_conditions_estimate_U()
        N0_bounds  = []
        for i in N0_initial:
            N0_bounds.append((0,i*3.))
        
        print "Before LL calc...", datetime.datetime.now().strftime('%H:%M:%S')
        print "Alpha counts=",numpy.rint(self.alpha_counts[alpha]).astype(int)
        tmp = self.U_chain_rate(N0_initial)
        print "Initial Model Counts=",tmp
        nll = lambda N0: -numpy.sum( -scipy.stats.poisson.logpmf(numpy.rint(self.alpha_counts[alpha]).astype(int), self.U_chain_rate(N0)[ialpha]) )
        res = scipy.optimize.minimize(nll, N0_initial, bounds=N0_bounds, method='TNC')#, options={'iprint':0})
        print "NLL minimization results = ", res
        print "After LL calc...", datetime.datetime.now().strftime('%H:%M:%S')
        
        # 1D profile scan of result
        if profile in self.alpha_list:
            # Find the ingex of the alpha we're profiling
            for i,a in enumerate(self.decay_chain_U):
                if a['decay'] == profile:
                    palpha = i
                    if self.verbose: print "Profiling over %s w/ index %d"%(profile,i)
                    break
            # Make profile
            if res.x[palpha] > 100:
                profile_var = numpy.linspace(res.x[palpha]*0.9,res.x[palpha]*1.1,30)
            else:
                profile_var = numpy.linspace(0,res.x[palpha]*2.,30)
            profile_nll = numpy.zeros(profile_var.size)
            profile_N0 = res.x
            for i,x in enumerate(profile_var):
                profile_N0[palpha] = x
                profile_nll[i] = nll(profile_N0)
            print profile_var
            print profile_nll
            plt.plot( profile_var, max(profile_nll) - profile_nll, 'o')
            plt.title('Profile of %s'%profile)
            plt.ylabel('NLL - max(NLL)')
            plt.xlabel('Initial %s atoms'%profile)
            print "Close plot to continue..."
            plt.show()
        # 2D scan is done by specifying the two alphas to profile over in a list 
        if type(profile) == type([]):
            # Find the ingex of the alpha we're profiling
            n = 10
            for i,a in enumerate(self.decay_chain_U):
                if a['decay'] == profile[0]:
                    palpha1 = i
                    if self.verbose: print "Profiling over %s w/ index %d"%(profile[0],i)
                    break
            for i,a in enumerate(self.decay_chain_U):
                if a['decay'] == profile[1]:
                    palpha2 = i
                    if self.verbose: print "Profiling over %s w/ index %d"%(profile[1],i)
                    break
            profile_var1 = numpy.linspace(res.x[palpha1]*0.8,res.x[palpha1]*1.2,n)
            #profile_var2 = numpy.linspace(res.x[palpha2]*0.9,res.x[palpha2]*1.1,n)
            profile_var2 = numpy.linspace(-10,10,n)
            print "profile1 %d-%d"%(res.x[palpha1]*0.9,res.x[palpha1]*1.1)
            print "profile2 %d-%d"%(res.x[palpha2]*0.9,res.x[palpha2]*1.1)
            profile_nll = numpy.zeros((n,n))
            profile_N0 = res.x
            for i,x in enumerate(profile_var1):
                profile_N0[palpha1] = x
                for j,y in enumerate(profile_var2):
                    profile_N0[palpha2] = y
                    profile_nll[i,j] = nll(profile_N0)
            profile_nll = profile_nll.max() - profile_nll
            print profile_nll
            plt.pcolormesh(profile_var1,profile_var2,profile_nll,cmap='jet')
            plt.colorbar()
            plt.show()

    def chi2_parms(self, parms):
        """
        This is used as a function to minimize, chi^2
        """
        N0 = []
        # Unpack values from parms
        for i,a in enumerate(self.decay_chain_U):
            if a['decay'] in parms.keys():
                N0.append(parms[a['decay']].value)
            else:
                N0.append(0)
        # Get # alphas from model and initial conditions guess
        model = self.U_chain_rate(N0)
        # Calculate chi2 for data (Uranium chain)
        chi2 = {}
        chi2_test = numpy.array([])
        chi2_tot = 0
        for alpha in ['Po214','Po218']:
            for i,a in enumerate(self.decay_chain_U):
                if self.decay_chain_U[i]['decay'] == alpha:
                    ialpha = i
                    break
            chi2[alpha] = (model[ialpha] - self.alpha_counts[alpha]) / (self.alpha_counts[alpha])**.5
            chi2_tot += numpy.sum((model[ialpha] - self.alpha_counts[alpha]) / (self.alpha_counts[alpha])**.5)
            chi2_test = numpy.append(chi2_test,(model[ialpha] - self.alpha_counts[alpha]) / (self.alpha_counts[alpha])**.5)
        #print chi2
        #print chi2_tot
        return chi2_test
        
    def get_U_chain_matrix(self):
        """
        This just grabs data from isotopes table about the U chain for the diff'e'qs
        Adds attribute used in modeling U chain. 
        Runs in __init__ so since this is all constants.
        """
        A = numpy.zeros((len(self.decay_chain_U),len(self.decay_chain_U)))
        decay_chain_lambdas = numpy.zeros((len(self.decay_chain_U),1))
        for i in range(len(self.decay_chain_U)):
            A[i,i] = -1*ln2/self.decay_chain_U[i]['T12'] # 1/sec
            decay_chain_lambdas[i] = ln2/self.decay_chain_U[i]['T12']
            if i+1<len(self.decay_chain_U):
                A[i+1,i] = ln2/self.decay_chain_U[i]['T12']
            if self.verbose>1:
                print "%s: t12=%.2e sec, lambda=%.2e 1/sec"%(self.decay_chain_U[i]['decay'],self.decay_chain_U[i]['T12'],decay_chain_lambdas[i])
        # Get eigenvalues and vectors
        evalue,evector = numpy.linalg.eig(A)
        # Verbose Printout shows matrix A and eigenvalue/vector pairs
        if self.verbose>1:
            print "A="
            self.print_mat(A)
            print "\n"
            print "Eigenvalue: Eigenvector"
            for i in range(len(evalue)):
                print '%d:'%i, evalue[i], evector[:,i]
            print "\n"
            print "V="
            self.print_mat(evector)
            print "V^{-1}="
            self.print_mat(numpy.linalg.inv(evector))
        self.U_chain_evector = evector
        self.U_chain_evalue  = evalue
        self.U_chain_lambdas = decay_chain_lambdas
        
    def U_chain_rate(self,N0):
        """
        Models the decay chain of Uranium. This returns the number of counts
        for each isotope in the chain for each interval. N(t)[iso,interal].
        General solution to the problem is:
          [dN/dt] = [A][N] describes the chains
          [N(t)] = [V][Lambda][V^-1][N0]
          [V] = eigenvectors matrix
          [Lambda] = matrix.diag(exp(eigenvalues))
          decays = lambda * integral(N(t),t0,tf)
        """
        #TODO: maybe add optional time_array if you want to use not self.datetime
        # We use self.decay_chain_U to construct the matrix A, this is built in init with get_decay_chains
        Nt = lambda t,n0: numpy.dot(numpy.dot(numpy.dot(self.U_chain_evector, numpy.diag(numpy.exp(self.U_chain_evalue*t))), numpy.linalg.inv(self.U_chain_evector)),n0)
        # This integrates the number of atoms in an interval so we can get the decay rate (multiply by lambda)
        # JF translation: Total number of decays in the interval
        ndecays = numpy.zeros((len(self.decay_chain_U),self.inum))
        # Loop over intervals (individual spectra)
        #TODO: why integrate over realtime!
        for i in range(self.inum):
            it0 = ( self.istarttime[i] - self.runstarttime ).total_seconds()
            itf = ( self.iendtime[i]   - self.runstarttime ).total_seconds()
            ita,istep = numpy.linspace(it0,itf,num=50,endpoint=False,retstep=True)
            # Numerical integration for the interval
            for t in ita:
                ndecays[:,i] += Nt(t,N0)*istep
        ndecays = ndecays * self.U_chain_lambdas #TODO: Fix the way livetime is handled
        if self.verbose>1: print "ndecays=", ndecays
        return ndecays

    def fit_U_chain(self, custom_eff = None):
        """
        Fit on U chain, with floating parameters for Ra226, Rn222, Po218, Pb214, Bi214.
        If no optional arguments are given this fits the counts only.
        custom_eff = tuple of efficiencies to use for (drift, charge_fraction, diode)
        if you want to float any of these values then append boolians in same order, 
        so len(custom_eff) must be either 3 or 6. 
        """
        # Calculate the initial values from a simple model
        N0_initial = self.initial_conditions_estimate_U()
        # Initialize lmfit parameters
        parms = lmfit.Parameters()
        for i,a in enumerate(self.decay_chain_U):
            if a['decay'] in ['Po214','Pb210']:
                continue
                #parms.add(a['decay'], value=0, vary=False, min=0.)
            else:
                parms.add(a['decay'], value=N0_initial[i], vary=True, min=0.)
        if custom_eff != None:
            if len(custom_eff) == 3:
                parms.add('drift_eff', value = custom_eff[0], vary=False)
                parms.add('charge_eff', value = custom_eff[1], vary=False)
                parms.add('diode_eff', value = custom_eff[2], vary=False)
            elif len(custom_eff) == 6:
                parms.add('drift_eff', value = custom_eff[0], vary = custom_eff[3], min = .01)
                parms.add('charge_eff', value = custom_eff[0], vary = custom_eff[4], min = .01)
                parms.add('diode_eff', value = custom_eff[0], vary = custom_eff[5], min = .01)
            else:
                print "unknown parameter: ", custom_eff
        result = lmfit.minimize(self.chi2_parms, parms)
        #final = self.alpha_counts[alpha] + result.residual
        lmfit.report_fit(parms)

    def fit_U_chain_iminuit_NLL(self, fit_full_chain = False, float_drift_eff = False, print_level=0):
        """
        This uses iminuit to minimize the NLL defined by:
        NLL = sum( (model_Po218 + model_Po214) - ( data_Po218*ln(model_Po218) + data_Po214*ln(model_Po214) ))
        We get the initial values of Rn and Ra from self.initial_conditions_estimate_U().
        Values are limited such that no value can be negative (but to do this upper limits were also necessary)
        Ra = [0,1e20], remaining limts are=[0,1e9].
        The NLL function is self.nll_U_chain_iminuit(Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Po214_N0):
        """
        # NOTE: errordef=0.5 for NLL fitting, and errordef=1 (default) for chi2
        import iminuit
        N0_initial = self.initial_conditions_estimate_U()
        lim_Ra226_N0 = (0.,1.e19)
        lim_Rn222_N0 = (0.,1.e9)
        if fit_full_chain == False:
            if float_drift_eff == False:
                m = iminuit.Minuit(self.nll_top_U_chain_iminuit, Ra226_N0 = N0_initial[0], Rn222_N0 = N0_initial[1], limit_Rn222_N0=(0,1e9), limit_Ra226_N0=(0,1e19), print_level=print_level, errordef=0.5)
                m_out, m_param = m.migrad()
                self.fit_top_U_iminuit_nll = m.values
                self.fit_top_U_iminuit_nll_out = m_out
                self.fit_top_U_iminuit_nll_param = m_param
            else: 
                m = iminuit.Minuit(self.nll_top_U_chain_iminuit_floatdrift, Ra226_N0 = N0_initial[0], Rn222_N0 = N0_initial[1], coll_fac=1., limit_Rn222_N0=(0,1e9), limit_Ra226_N0=(0,1e19), limit_coll_fac=(0.,1.), error_coll_fac=0.1, print_level=print_level, errordef=.5)
                m_out, m_param = m.migrad()
                self.fit_top_U_iminuit_nll_collfac = m.values
                self.fit_top_U_iminuit_nll_collfac_out = m_out
                self.fit_top_U_iminuit_nll_collfac_param = m_param
        elif fit_full_chain == True:
            lim_Po218_N0 = (0.,1.e9)
            lim_Pb214_N0 = (0.,1.e9)
            lim_Bi214_N0 = (0.,1.e9)
            if float_drift_eff == False:
                m = iminuit.Minuit(self.nll_full_U_chain_iminuit, Ra226_N0 = N0_initial[0], Rn222_N0 = N0_initial[1], limit_Rn222_N0=(0,1e9), limit_Ra226_N0=(0,1e19), limit_Po218_N0=(0,1e9), limit_Pb214_N0=(0,1e9), limit_Bi214_N0=(0,1e9), print_level=print_level, errordef=0.5)
                m_out, m_param = m.migrad()
                self.fit_full_U_iminuit_nll = m.values
                self.fit_full_U_iminuit_nll_out = m_out
                self.fit_full_U_iminuit_nll_param = m_param
            else: 
                m = iminuit.Minuit(self.nll_full_U_chain_iminuit_floatdrift, Ra226_N0 = N0_initial[0], Rn222_N0 = N0_initial[1], coll_fac=1., limit_Rn222_N0=(0,1e9), limit_Ra226_N0=(0,1e19), limit_Po218_N0=(0,1e9), limit_Pb214_N0=(0,1e9), limit_Bi214_N0=(0,1e9), limit_coll_fac=(0.,1.), error_coll_fac=0.1, print_level=print_level, errordef=0.5)
                m_out, m_param = m.migrad()
                self.fit_full_U_iminuit_nll_collfac = m.values
                self.fit_full_U_iminuit_nll_collfac_out = m_out
                self.fit_full_U_iminuit_nll_collfac_param = m_param
        if self.verbose:
            print m_out, m_param
            print m.values, m.parameters, m.args
            print 'parameters=', m.parameters
            print "fit_U_chain_iminuit_NLL:"
            print "  Rn0 = %.2f dpd"%m.values['Rn222_N0']
            print "  Ra0 = %.2f dpd"%m.values['Ra226_N0']
            N0 = numpy.zeros(len(self.decay_chain_U))
            for i,alpha in enumerate(self.decay_chain_U):
                if alpha['decay']+'_N0' in m.parameters:
                    N0[i] = m.values[alpha['decay']+'_N0']
            print " Fit populations is (in atoms):"
            for i,alpha in enumerate(self.decay_chain_U):
                print "   %s: %d,%.2e dpd"%(alpha['decay'],N0[i],N0[i]*ln2/(isotopes.decays[alpha['decay']][0]['T12']/3600./24.))
            leg = []
            model = self.U_chain_rate(N0)
            chi2 = {}
            for alpha in self.alpha_counts.keys():
                if alpha in ['Po210','Po212','Po216']: continue
                for i,a in enumerate(self.decay_chain_U):
                    if a['decay'] == alpha:
                        ialpha = i
                        break
                if alpha == 'Po218' and float_drift_eff == True:
                    model[ialpha] = model[ialpha]*m.values['coll_fac']
                yerror = numpy.zeros((2,self.inum))
                tmp = scipy.stats.poisson.interval(0.682,self.alpha_cpd[alpha])
                yerror[0,:] = numpy.array(self.alpha_cpd[alpha]) - tmp[0]
                yerror[1,:] = tmp[1] - numpy.array(self.alpha_cpd[alpha])
                plt.errorbar(self.elapsed_time_sec, self.alpha_cpd[alpha], yerr=yerror, fmt='o', label='%s data'%alpha)
                leg.append('%s data'%alpha)
                plt.plot(self.elapsed_time_sec, model[ialpha]*3600.*24./self.ilivetime, label='%s model'%alpha)
                leg.append('%s fit'%alpha)
                chi2[alpha] = (self.alpha_counts[alpha] - model[ialpha])**2. / model[ialpha]
            print "Close plot to continue"
            leg = plt.legend(loc=1, borderaxespad=0.)
            leg.get_frame().set_alpha(0.5)
            plt.xlabel("Elapsed Time (sec)")
            plt.ylabel("Counts Per Day")
            if m_out['is_valid'] == False:
                fit_status = 'Failed!!'
            if m_out['is_valid'] == True:
                fit_status = 'succeeded'
            plt.title('NLL FIT (U-chain) Fit %s'%fit_status)
            plt.show()
            print "Calculating Chi^2 of estimate:"
            for k in chi2.keys():
                print '  %s Chi2 = %.2f/(%d-1) = %.2f'%(k,numpy.sum(chi2[k]),len(chi2[k]), numpy.sum(chi2[k])/(len(chi2[k])-1))
            
    def nll_full_U_chain_iminuit(self, Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Bi214_N0):
        """
        Full chain NLL
        NLL = sum( (model_Po218 + model_Po214) - ( data_Po218*ln(model_Po218) + data_Po214*ln(model_Po214) ))
        """
        N0 = [Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Bi214_N0, 0]
        #N0 = [Ra226_N0, Rn222_N0, 0., 0., 0., 0.]
        model = self.U_chain_rate(N0)
        #model_Po218 = model[2]
        #model_Po214 = model[5]
        data_Po218  = self.alpha_counts['Po218']
        data_Po214  = self.alpha_counts['Po214']
        NLL = numpy.sum( (model[2] + model[5]) - ( data_Po218*numpy.log(model[2]) + data_Po214*numpy.log(model[5]) ))
        return NLL
        
    def nll_full_U_chain_iminuit_floatdrift(self, Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Bi214_N0, coll_fac):
        """
        Full chain NLL with a collection efficiency factor. Po218 has one oppertunity to collect, and Po214 has more
        NLL = numpy.sum( (model[2]*coll_fac + model[5]) - ( data_Po218*numpy.log(model[2]*coll_fac) + data_Po214*numpy.log(model[5]) ))
        """
        N0 = [Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Bi214_N0, 0]
        model = self.U_chain_rate(N0)
        data_Po218  = self.alpha_counts['Po218']
        data_Po214  = self.alpha_counts['Po214']
        NLL = numpy.sum( (model[2]*coll_fac + model[5]) - ( data_Po218*numpy.log(model[2]*coll_fac) + data_Po214*numpy.log(model[5]) ))
        return NLL
    
    def nll_top_U_chain_iminuit(self, Ra226_N0, Rn222_N0):
        """
        Fit using only Rn and Ra as floating parameters
        NLL = sum( (model_Po218 + model_Po214) - ( data_Po218*ln(model_Po218) + data_Po214*ln(model_Po214) ))
        """
        N0 = [Ra226_N0, Rn222_N0, 0., 0., 0., 0.]
        model = self.U_chain_rate(N0)
        data_Po218  = self.alpha_counts['Po218']
        data_Po214  = self.alpha_counts['Po214']
        NLL = numpy.sum( (model[2] + model[5]) - ( data_Po218*numpy.log(model[2]) + data_Po214*numpy.log(model[5]) ))
        return NLL
        
    def nll_top_U_chain_iminuit_floatdrift(self, Ra226_N0, Rn222_N0, coll_fac):
        """
        Fit using only Rn and Ra and a collection factor as floating parameters.
        Po218 has one oppertunity to collect, and Po214 has more
        NLL =numpy.sum( (model[2]*coll_fac + model[5]) - ( data_Po218*numpy.log(model[2]*coll_fac) + data_Po214*numpy.log(model[5]) ))
        """
        N0 = [Ra226_N0, Rn222_N0, 0., 0., 0., 0.]
        model = self.U_chain_rate(N0)
        data_Po218  = self.alpha_counts['Po218']
        data_Po214  = self.alpha_counts['Po214']
        NLL = numpy.sum( (model[2]*coll_fac + model[5]) - ( data_Po218*numpy.log(model[2]*coll_fac) + data_Po214*numpy.log(model[5]) ))
        return NLL
        
    def fit_U_chain_iminuit_chi2(self):
        """
        This uses iminuit to minimize the Chi2 defined by:
          chi2 = numpy.sum((model[ialpha] - self.alpha_counts[alpha])**2 / (self.alpha_counts[alpha]))
        We get the initial values of Rn and Ra from self.initial_conditions_estimate_U().
        Values are limited such that no value can be negative (but to do this upper limits were also necessary)
        Ra = [0,1e20], remaining limts are=[0,1e9].
        The NLL function is self.nll_U_chain_iminuit(Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Po214_N0):
        """
        import iminuit
        N0_initial = self.initial_conditions_estimate_U()
        m = iminuit.Minuit(self.chi2_U_chain_iminuit, Ra226_N0 = N0_initial[0], Rn222_N0 = N0_initial[1], limit_Rn222_N0=(0., 2*N0_initial[1]), limit_Ra226_N0=(0., 1e20), limit_Po218_N0=(0., 1e9), limit_Pb214_N0=(0., 1e9), limit_Bi214_N0=(0., 1e9), print_level=1)
        m.print_param()
        m.migrad()
        print 'parameters', m.parameters
        print 'args', m.args
        print 'value', m.values
        A.fit_result_iminuit_chi2 = m.values
             
    def chi2_U_chain_iminuit(self, Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Po214_N0):
        """
        Returns chi2 used by minimizer in fit_U_chain_iminuit_chi2
        """
        N0 = [Ra226_N0, Rn222_N0, Po218_N0, Pb214_N0, Bi214_N0, 0.]
        model = self.U_chain_rate(N0)
        # Calculate chi2 for data (Uranium chain)
        chi2 = {}
        chi2_test = numpy.array([])
        chi2_tot = 0
        for alpha in ['Po214','Po218']:
            for i,a in enumerate(self.decay_chain_U):
                if self.decay_chain_U[i]['decay'] == alpha:
                    ialpha = i
                    break
            #chi2[alpha] = (model[ialpha] - self.alpha_counts[alpha])**2 / (self.alpha_counts[alpha])**.5
            chi2_tot += numpy.sum((model[ialpha] - self.alpha_counts[alpha])**2 / (self.alpha_counts[alpha]))
            #chi2_test = numpy.append(chi2_test,(model[ialpha] - self.alpha_counts[alpha]) / (self.alpha_counts[alpha])**.5)
        #print chi2
        #print chi2_tot
        return chi2_tot
        
    def print_mat(self,mat):
        """
        This prints out a given matrix to STDIO in a pretty format
        """
        if sum(mat.shape)-1 == mat.size:
            # 1D matrix
            for i in range(mat.size):
                print '% .3f'%mat[i]
        else:
            # 2D
            for i in range(len(mat[0,:])):
                s = ''
                for j in range(len(mat[:,i])):
                    s+='% .3e\t'%mat[i,j]
                print s
        print "\n"

    # TODO: Chain constrained...

# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
#                                                            PLOTS
# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
def escrun_heat_plot(run,logz=False,accum=False,livetime_corr=True,dpi=100):
    """
    Returns matplotlib.figure object of heat plot
        escrun_heat_plot(run,logz=False,accum=False):
        logz = Scale the color False=linear, True=log(default)
        accum = Plot accumulated counts (True) or interval counts (False=default)
    """
    if accum == True:
        data = run.data
    else:
        data = run.idata
    fig = plt.figure(figsize=(15,8), dpi=dpi)
    if livetime_corr == True:
        # Make an array for every second (rounded) x number of channels
        imgM = numpy.zeros((numpy.sum(numpy.round(run.irealtime/60.)),run.nchan),dtype=numpy.uint16)
        istart = 0
        for i in range(run.inum):
            iend = istart+numpy.round(run.ilivetime[i]/60.)
            imgM[istart:iend,:] = data[i]
            istart = istart+numpy.round(run.irealtime[i]/60.)
        if logz==False:
            cax1 = plt.imshow(imgM, aspect='auto')
        else:
            cax1 = plt.imshow(imgM, aspect='auto', norm=LogNorm(vmin=1,vmax=run.data.max()))
    else: #livetime_corr==False
        if logz==False:
            cax1 = plt.imshow(run.idata, aspect='auto')
        else:
            cax1 = plt.imshow(run.idata, aspect='auto', norm=LogNorm(vmin=1,vmax=run.data.max()))

    plt.ylabel('Minutes (since t0)')
    plt.xlabel('Channel')
    pylab.colorbar()
    return fig


# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
#                                                            ESCDB
# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
class escdb:
    """Saves information about the ESC energy scale for determining peaks
    maybe eventually Saves infomation about ESC backgrounds??
    Structure:
      {'ESC1':{
         'calib':[{'date':<datetime>,'params':(p1,p2,...),'ROI':(xmin,xmax)},...],
         'data_track':[{'date':<datetime>,'line':(b,m,chi2),'quad':(a0,a1,a2,chi2),'res':{'<peak>':(<laurentz>,<gaus>)}},...],
         'detector_changes':[{'date':<datetime>,'abv':"AbbrevNote",'note':"Long description},...],
         'background_c':[{'date':<datetime>,'bkg':<float>},...],
         'background_p':[{'date':<datetime>,'bkg':<float>},...]}
      'ESC2:':{...}}
    """
    #TODO: Perhaps we want a lite version for only getting calibration information...
    def __init__(self, filepath, **kwargs):
        self.filepath = filepath
        self.verbose = False
        self.escdb = pickle.load(open(filepath,'r'))
        if 'verbose' in kwargs.keys():
            self.verbose = kwargs['verbose']
        self.__sort_db()
    
    def __add_esc(self,key):
        """For when an ESC needs added to the database"""
        self.escdb[key] = {
            'calib':[],
            'data_track':[],
            'detector_changes':[],
            'background_c':[],
            'background_p':[]}

    def __sort_db(self):
        """Since we're using lists one can append data out of order, therefore we write a method
        that sorts all the data which we'll use after every add method and prior to save"""
        for esc in self.escdb.keys():
            for data in self.escdb[esc].keys():
                #Now we're at the list level...
                self.escdb[esc][data].sort(key=lambda k: k['date'])
    
    def __get_esc_key(self,esc_num):
        if type(esc_num) != int: esc_num = int(esc_num)
        return 'ESC%d'%esc_num

    def savedb(self,filepath = None, backup = True):
        """Saves the file over the old filepath, or at a new location if filepath is specified"""
        self.__sort_db()
        if filepath == None:
            fp = self.filepath
        else:
            fp = filepath
        if os.path.isfile(fp):
            if backup == True:
                shutil.copyfile(fp,fp.split('.')[0]+'_'+datetime.datetime.strftime(datetime.datetime.now(),"%Y%m%d_%H%M%S")+'.pkl')
            try:
                pickle.dump(self.escdb, open(fp,'w'))
            except:
                print "ESCDB: Could not save escdb"

    def get_esc_cal(self, esc_num, date):
        """Returns the most recent prior calibration for the date given"""
        key = self.__get_esc_key(esc_num)
        if key not in self.escdb.keys() or self.escdb[key]['calib'] == []:
            print "ESCDB: No calibration information for %s"%key
            return None
        for i,cal in enumerate(self.escdb[key]['calib']):
            if cal['date'] > date:
                break
        if i == 0:
            print "ESCDB: No calibration date exists before date requested"
            return None
        if self.verbose:
            print "Using current calibration for date %s"%(datetime.datetime.strftime(self.escdb[key]['calib'][i]['date'],"%Y%m%d"))
        return self.escdb[key]['calib'][i]
   
    def add_esc_cal(self, esc_num, date, type, params):
        """So you want to add a new calibration to the ESCDB? This method does that"""
        key = self.__get_esc_key(esc_num)
        if key not in self.escdb.keys():
            self.__add_esc(key)
        self.escdb[key]['calib'].append({'date':date, 'type':type, 'params':params})
        self.__sort_db()

    def add_data_track(self, esc_num, date, **kwargs):
        key = self.__get_esc_key(esc_num)
        if key not in self.escdb.keys():
            self.__add_esc(key)
        dp = {'date':date}
        if 'line' in kwargs.keys():
            dp['line'] = kwargs['line']
        self.__sort_db()


    def make_plots(self):
        """Make plots that show the time evolution of the ESCDB entries"""
        pass


# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
#                                                            READCHN
# --oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo----oooOOO0000OOOoooo--
class readchn:
    """Takes input of filename and outputs esc data for one CHN file"""
    
    def __init__(self, filestream=None, **kwargs):
        self.verbose = False
        if 'verbose' in kwargs.keys():
            self.verbose = kwargs['verbose']
        self.from_file = None
        if 'file' in kwargs.keys():
            self.from_file = kwargs['file']
            filestream = open(kwargs['file'],'rb').read()
        self.header,self.data = self.parse_filestream(filestream)
        (self.ftype,self.mca_num,self.segment,self.secs,self.realtime,self.livetime
        ,self.acqtime,self.channoffset,self.nchan)=self.header
        self.convert_numpy()
        if self.verbose: self.Print()
    
    def parse_filestream(self,filestream):
        import struct
        from numpy import array
        #the header contains the following: (ftype,mca_num,segment,secs,realtime,livetime,acqtime,channoffset,nchan)
        hdrfmt='h h h 2s i i 12s h h'
        hdrsize=struct.calcsize(hdrfmt)
        hdr=struct.unpack(hdrfmt,filestream[:hdrsize])
        if hdr[0]!=-1: raise ValueError('The file is not recognised as Ortec CHN format')
        if hdr[8]<=0: raise ValueError('Empty Header?')
        datafmt="%ii"%(hdr[8]) # hdr[8] is the number of channels and hdr[7] is the channel offset
        datasize=struct.calcsize(datafmt)
        data=array(struct.unpack(datafmt,filestream[hdrsize:hdrsize+datasize]),dtype='i')
        return hdr,data

    def convert_numpy(self):
        self.nchan = int(self.nchan)
        self.livetime = int(self.livetime)/50. #seconds since start of run
        self.realtime = int(self.realtime)/50. #seconds since start of run
        self.runstarttime = datetime.datetime.strptime(self.acqtime[:7]+self.acqtime[8:]+self.secs,'%d%b%y%H%M%S')
        
    def Print(self):
        print "READCHN ------------------------------------------------------"
        print " From File?: %s"%(str(self.from_file))
        print " Date = %s"%(self.datetime_istart.strftime("%Y/%m/%d  %H:%M:%S"))
        print " Secs=%d, RealTime=%d, LiveTime=%d"%(self.secs,self.realtime,self.livetime)
        print "--------------------------------------------------------------"

class UnexpectedShit(Exception):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)
