#Copyright (C) Nial Peters 2012
#
#This file is part of AvoScan.
#
#AvoScan is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoScan is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoScan.  If not, see <http://www.gnu.org/licenses/>.
"""
The processing module provides a set of classes and functions for post-processing
scan data. In particular it helps deal with assigning angles to spectrum files.

This module is very much still under construction - please refer to the source code
to see how things work.
"""


import datetime
import calendar
import re
import os
import numpy
import math
import xlrd
import scipy.optimize
from pylab import plot,legend,show
from collections import namedtuple
from std_ops.os_ import find_files
from std_ops.iter_ import array_multi_sort, multi_sort




class AvoScanAngles:
    """
    AvoScanAngles(filename)
    
    Container class for the times and angles data stored in an AvoScan 
    angles file.
    
    * filename - name of AvoScan angles file to be loaded.
    """
    def __init__(self, filename):
        self.times = []
        self.sec_times = []
        self.angles = []
        
        with open(filename,"r") as ifp:
            self.__load_file(ifp)

    
    def __load_file(self, ifp):
        
        for line in ifp:
            if line.startswith("#") or line.isspace() or line == "":
                continue
            
            words = line.split()
            
            #skip corrupted lines
            if len(words) < 2:
                continue
            
            timestamp = float(words[0].lstrip().rstrip())
            self.times.append(datetime.datetime.utcfromtimestamp(timestamp))
            self.sec_times.append(date2secs(self.times[-1]))
            
            self.angles.append(float(words[1].lstrip().rstrip()))



    def time2angle(self, t, time_error = 1.0, angle_error=0.0):
        """
        Given a datetime object 't' returns a ValueAndError tuple with the corresponding 
        angle, positive and negative error
        """
           
        #convert time to seconds from epoch
        t = date2secs(t)
        
        if t < self.sec_times[0] or t > self.sec_times[-1]:
            return ValueAndError(-1,-1)
        i = 0
        while self.sec_times[i] < t:
            i+=1

        a = (self.angles[i]-self.angles[i-1])/(self.sec_times[i]-self.sec_times[i-1])
        b = t - self.sec_times[i-1]
        
        #calculate the error in the calculated angle        
        err_in_a = math.sqrt(2)*angle_error
        err_in_b = time_error
        
        if a !=0 and b!=0:
            err_in_a_times_b = math.fabs(a*b) * math.sqrt((err_in_a/a)**2 + (err_in_b/b)**2)
        else:
            err_in_a_times_b = 0.0
        
        err_in_result = math.sqrt(err_in_a_times_b**2 + angle_error**2)
        
        return ValueAndError((a*b)+self.angles[i-1], err_in_result)
  


def load_times_file(filename):
    
    filetimes = {}
    
    with open(filename,'r') as ifp:
        for line in ifp:
            if line.startswith('#'):
                continue
            
            words = line.split()
            
            #the path might have spaces in it!
            file_path = "".join(words[:-1])
            name = os.path.basename(file_path).split('\\')[-1]
            t = datetime.datetime.utcfromtimestamp(float(words[-1]))
            
            filetimes[name] = t
    
    return filetimes
    

    

GaussianParameters = namedtuple('GaussianParameters',['amplitude','mean','sigma','y_offset'])

def fit_gaussian(xdata, ydata, amplitude_guess=None, mean_guess=None, sigma_guess=None, y_offset_guess=None, plot_fit=False):
    """
    Fits a gaussian to some data using a least squares fit method. Returns a named tuple
    of best fit parameters (amplitude, mean, sigma, y_offset).
    
    Initial guess values for the fit parameters can be specified as kwargs. Otherwise they
    are estimated from the data.
    
    If plot_fit=True then the fit curve is plotted over the top of the raw data and displayed.
    """
    
    if len(xdata) != len(ydata):
        raise ValueError, "Lengths of xdata and ydata must match"
    
    if len(xdata) < 4:
        raise ValueError, "xdata and ydata need to contain at least 4 elements each"
    
    # guess some fit parameters - unless they were specified as kwargs
    if amplitude_guess is None:
        amplitude_guess = max(ydata)
    
    if mean_guess is None:
        weights = ydata - numpy.average(ydata)
        weights[numpy.where(weights <0)]=0 
        mean_guess = numpy.average(xdata,weights=weights)
                   
    #use the y value furthest from the maximum as a guess of y offset 
    if y_offset_guess is None:
        data_midpoint = (xdata[-1] + xdata[0])/2.0
        if mean_guess > data_midpoint:
            yoffset_guess = ydata[0]        
        else:
            yoffset_guess = ydata[-1]

    #find width at half height as estimate of sigma        
    if sigma_guess is None:      
        variance = numpy.dot(numpy.abs(ydata), (xdata-mean_guess)**2)/numpy.abs(ydata).sum()  # Fast and numerically precise    
        sigma_guess = math.sqrt(variance)
    
    
    #put guess params into an array ready for fitting
    p0 = numpy.array([amplitude_guess, mean_guess, sigma_guess, yoffset_guess])

    #define the gaussian function and associated error function
    fitfunc = lambda p, x: p[0]*scipy.exp(-(x-p[1])**2/(2.0*p[2]**2)) + p[3]
    errfunc = lambda p, x, y: fitfunc(p,x)-y
   
    # do the fitting
    p1, success = scipy.optimize.leastsq(errfunc, p0, args=(xdata,ydata))

    if success not in (1,2,3,4):
        raise RuntimeError, "Could not fit Gaussian to data."

    if plot_fit:
        plot(xdata, ydata, 'g+', label='data')
        plot( xdata,[fitfunc(p1, i) for i in xdata], 'r-', label='fit')
        legend()
        show()
    
    return GaussianParameters(*p1)


def is_doasis_file(ifp):
    """
    Returns true if the file object ifp is a DOASIS .std file, false otherwise.
    Note: ifp is file OBJECT not a filename!
    """
    ifp.seek(0)    
    return ifp.readline().startswith("GDBGMNUP")

    

def doasis_file_get_time(ifp):
    #TODO - more efficient way to read the date/time from the file - shouldn't need to search
    # it more than once - smarter re needed!
    
    ifp.seek(0)
    file_contents = ifp.read()
    date_re = "^((?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<year>\d{4,4}))"
    time_re = "^((?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2}))"
    
    #read the date from the file
    d = re.search(date_re,file_contents,flags=re.MULTILINE)
    if d is None:
        raise ValueError, "Failed to read the date from the file"
    d = d.groupdict() 
    
    #read the time from the file
    t = re.search(time_re,file_contents,flags=re.MULTILINE)
    if t is None:
        raise ValueError, "Failed to read the time from the file"    
    t = t.groupdict() 
        
    #build a datetime object
    return datetime.datetime(int(d["year"]),int(d["month"]),int(d["day"]),int(t["hour"]),int(t["minute"]),int(t["second"]))

    
    
def is_spectrasuite_file(ifp):
    ifp.seek(0)
    return ifp.readline().startswith("SpectraSuite Data File")


def spectrasuite_file_get_time(ifp):
    ifp.seek(0)
    for line in ifp:
        if line.startswith("Date:"):
            break
    d = datetime.datetime.strptime(line.rstrip(), "Date: %a %b %d %H:%M:%S %Z %Y")
    return d


def date2secs(d):
    return calendar.timegm(d.timetuple())+d.microsecond/1e6


def produce_angles_list(angles_obj, spec_times, total_capture_times, time_error=1.0, angle_error=0.0):
    """
    Note that angle_error=0.0 because normally we do not care about the +/- 1deg systematic error
    on the angle due to slack in the gearbox - this is a constant value for all angles in a scan
    and therefore doesn't play into the error in the integration.
    """
    
    #calculate the angles and errors   
    result = [angles_obj.time2angle(t, time_error=time_error, angle_error=angle_error) for t in spec_times]
       
    #it is unphysical for the errors in the angles to overlap - the spectra were recorded in
    #order and the order is fixed, the stepper motor never runs backwards so the angles must 
    #always increase (or decrease depending on scan direction) between points. The absolute minimum 
    #possible time difference between two points is the integration time multiplied by the number
    #of coadds. This minimum spacing is ignored here - but is enforced by the monte carlo routines
    #of the ica_inspect program.   
    return result



def get_spectra_names_and_times(folder, recursive=True, spec_type="doasis"):
    
    if spec_type == "doasis":
        pattern = "*.std"
        check_func = is_doasis_file
        time_func = doasis_file_get_time
    
    elif spec_type == "spectrasuite":
        pattern = "*.txt"
        check_func = is_spectrasuite_file
        time_func = spectrasuite_file_get_time
    
    else:
        raise ValueError, "Unknown spectra type"
    
        
    files = find_files(folder, pattern=pattern, recursive=recursive, full_paths=True)
    print "Found",len(files),"spectra to process."
    
    times = []
    
    for f in files:
        with open(f,"r") as ifp:
            if check_func(ifp):
                try:
                    times.append(time_func(ifp))
                except TypeError:
                    print "Skipping file "+f
    
    #sort the lists into chronological order
    times, files = multi_sort(times, files)
    
    return files,times
    
    
    
def calc_angles_for_spectra(angle_file, spectra_dir):
    print "Loading angles file"
    
    angles_obj = AvoScanAngles(angle_file)
    
    print "Locating spectra and reading capture times"
    spectra_files, spectra_times = get_spectra_names_and_times(spectra_dir, recursive=True, spec_type="spectrasuite")
    
    print "Calculating angles"
    angles = produce_angles_list(angles_obj, spectra_times)
    
    with open("/home/nialp/test_file.txt","w") as ofp:
        for i in range(len(spectra_files)):
            if i >= len(angles) or angles[i]==-1:
                ofp.write(spectra_files[i] + "\t\t" + "-\n")
            else:
                ofp.write(spectra_files[i] + "\t\t"+ str(round(angles[i],2))+"\n")




def calc_plume_height_from_geometry(scan_pos1, scan_pos2, theta1, theta2):
    """
    scan_pos = (x,y,z) coordinates of scanner (from GPS point)
    theta = angle (degrees) to the centre of the plume.
    """
    
    #lets do all the trig in radians...
    theta1 = math.radians(theta1)
    theta2 = math.radians(theta2)
    
    x1,y1,z1 = scan_pos1
    x2,y2,z2 = scan_pos2
    
    xr = float(x1-x2)
    yr = float(y1-y2)
    zr = float(z1-z2)
    
    d = math.sqrt(xr**2 + yr**2 + zr**2) #straight line distance between the scanners
    
    
    alpha = math.asin(zr / d) #angle from horizontal of the straight line between the scanners
    
    plume_height = ((d*math.sin(theta1)*math.sin(theta2-alpha))/math.sin(theta1-theta2)) + z1
    
    return plume_height



def ExcelRowIter(filename):    
    book = xlrd.open_workbook(filename)    
    if book.nsheets > 1:
        print "Warning: File contains more than one sheet - I'm assuming you want the first one."    
    sh = book.sheet_by_index(0)    
    for i in range(sh.nrows):
        yield sh.row_values(i)


def create_angles_for_adv_retrieval(adv_ret_file, corr_data_file, angles_file, output_file, times_file=None):
    
    filenames, times, col_amounts, errors = load_adv_retrieval_file(adv_ret_file)
    
    
    #load the angles file
    angles_obj = AvoScanAngles(angles_file)
    
    #sanity check
    if angles_obj.times[0].date() != times[0].date():
        print angles_obj.times[0].date(), times[0].date()
        raise ValueError, "Your angles file appears to be from a different date to your spectra"
  
    
    #load the integration times and number of coadds from the correction data file
    int_times = []
    n_coadds = []
    with open(corr_data_file,"r") as ifp:
        for line in ifp:
            if line.isspace() or line == "":
                continue #skip blank lines
            words = line.split()
            if len(words) != 4:
                raise ValueError, "Correction Data file has invalid format. Expecting 4 columns"
            int_times.append(float(words[1])/1000.0) #int_time is in milliseconds in the corr_data file - covert to secs here
            n_coadds.append(int(words[2]))
    
    #sanity check 
    #if len(int_times) != len(times):
    #    raise ValueError, "The data in the columns file doesn't seem to match that in the Correction Data file. Different number of entries."
    
    
    total_capture_times = numpy.array(int_times) * numpy.array(n_coadds)
    
    if times_file is not None:
        filetimes = load_times_file(times_file)
        
    if len(filetimes.values()) == len(filenames):
        #then life is easy - the conversion script didn't miss out any spectra
        print "Number of spectra in columns file matches number of spectra in times file. Hooray!"
        times = sorted(filetimes.values())    
        
    elif len(filetimes.values()) < len(filenames):
        print "DogWarden has missed some files"
        i = 0
        while i < len(filenames):
            if filetimes.has_key(os.path.basename(filenames[i]).replace(".std",".txt").replace('__','_')):
                i += 1
                continue
            else:
                print "No time record for ",os.path.basename(filenames[i]).replace(".std",".txt").replace('__','_')
                filenames = numpy.delete(filenames,[i]) 
                col_amounts= numpy.delete(col_amounts,[i]) 
                errors = numpy.delete(errors,[i]) 
                total_capture_times= numpy.delete(total_capture_times,[i]) 
                i -= 1
            i += 1
        times = sorted(filetimes.values())
        
        assert len(filetimes.values()) == len(filenames)
    
    else:
        print len(filetimes.values()),len(filenames)
        raise ValueError()
        
#        if len(filetimes.values()) == len(filenames):
#            #then life is easy - the conversion script didn't miss out any spectra
#            print "Number of spectra in columns file matches number of spectra in times file. Hooray!"
#            times = sorted(filetimes.values())
#        
#        else:
#            #life is difficult! We have to try and figure out which files the conversion script skipped
#            print "Number of spectra in columns file does not match number of spectra in times file. Please be patient while I try to sort this out."
#            #get the spectra directory from the times file (here we assume that all the spectra
#            #are in one dir with no subdirs)
#            import wx
#            spec_dir = wx.DirSelector("Select spectra directory.", defaultPath=os.path.dirname(angles_file))
#                       
#            #build a mapping between the spectrum numbers of the spectrasuite files and
#            #the std files (remembering that some SS files may have been unreadable)
#            ss_files = []
#            for s in doas.spectra_dir.SpectraDirectory(spec_dir):
#                ss_files.append(os.path.basename(s.filename))
#            
#            
#            if len(ss_files) != len(filenames):
#                #then we have a problem
#                raise RuntimeError("Failed to build the SS filename to STD filename mapping")
#            print "len filenames = d, len times = d, len ss_filenames = d",len(filenames),len(times),len(ss_files)
#            for i in range(len(filenames)):
#
#                try:
#                    times[i] = filetimes[ss_files[i]]
#                except KeyError:
#                    print "No time record for file %s"%ss_files[i]
    
               
    #calculate the angles
    if times_file is None:
        #then the best time resolution we are going to get is 1 second
        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=1.0)
    else:
        #time error is not more than 10 ms (probably)
        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=0.01)
    
    #write the output
    with open(output_file,'w') as ofp:
        ofp.write("# Date_Time\t\t\tFilename\t\t\tAngle\t\tAngle Error\tCol. Amount\tCol. Amount Error\tIntergration Time\tNum. Coadds\n")
        i=0
        while i<len(times):
            #skip spectra that were taken when we weren't actually scanning (angles should be -1 for these)
            if angles[i].value <0:
                i+=1
                continue
            ofp.write(times[i].strftime("%d/%m/%Y_%H:%M:%S.%f")+"\t\t"+
                      filenames[i]+"\t\t"+ 
                      str(round(angles[i].value,3))+"\t\t"+ 
                      str(round(angles[i].error,3))+"\t\t"+
                      str(col_amounts[i])+"\t\t"+
                      str(errors[i])+"\t\t"+
                      str(round(int_times[i],3))+"\t\t"+
                      str(n_coadds[i])+"\n")
            
            i+=1


# def create_angles_for_adv_retrieval(adv_ret_xls_file, corr_data_file, angles_file, output_file):
#     
#     filenames=[]
#     times = []
#     col_amounts = []
#     errors = []
#     
#     #read the date from the directory that the columns file is in (the date is not stored
#     #in the file
#     parent_folder = os.path.split(os.path.dirname(os.path.realpath(adv_ret_xls_file)))[-1]
#     
#     #use regular expression to extract date information
#     r = re.match("(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})", parent_folder)
#     if r is None:
#         raise RuntimeError, "Failed to extract the date from the parent folder."
#     
#     date_info = r.groupdict()
#     record_date = datetime.datetime(int(date_info['year']),int(date_info['month']), int(date_info['day']))
#     
#     
#     data_reader = ExcelRowIter(adv_ret_xls_file)
#                     
#     #read the rows up to the one containing the column headings and record
#     #the column headings
#     #skip the first row
#     data_reader.next()
#     i=0
#     missing_files = []
#     for row in data_reader:
#         if row[3].isspace() or row[3] == "":
#             continue
#         times.append(record_date + datetime.timedelta(hours = 24 * row[5]))
#         filenames.append(row[3])
#         if int(row[4]) != i:
#             print "Warning: Spectrum number",i,"is missing from the advanced retrieval results!"
#             missing_files.append(i)
#             i= int(row[4])
#         col_amounts.append(float(row[8]))
#         errors.append(float(row[9]))
#         i+=1    
#     #if the times went passed midnight then we have a problem (because the date will not have changed accordingly)
#     #to fix this we just look for large backwards steps in the times and add an extra day as appropriate
#     i=1
#     while i<len(times):
#         if times[i]<times[i-1]:
#             times[i] += datetime.timedelta(days=1)
#         i+=1
#   
#     #load the angles file
#     angles_obj = AvoScanAngles(angles_file)
#     
#     #sanity check
#     if angles_obj.times[0].date() != record_date.date():
#         print angles_obj.times[0].date(), record_date.date()
#         raise ValueError, "Your angles file appears to be from a different date to your spectra"
#   
#     
#     #load the integration times and number of coadds from the correction data file
#     int_times = []
#     n_coadds = []
#     with open(corr_data_file,"r") as ifp:
#         i=0
#         for line in ifp:
#             if line.isspace() or line == "":
#                 continue #skip blank lines
#             if i in missing_files:
#                 i+=1
#                 continue
#             words = line.split()
#             if len(words) != 4:
#                 raise ValueError, "Correction Data file has invalid format. Expecting 4 columns"
#             int_times.append(float(words[1])/1000.0) #int_time is in milliseconds in the corr_data file - covert to secs here
#             n_coadds.append(int(words[2]))
#             i+=1
#     
#     #sanity check 
#     if len(int_times) != len(times):
#         print len(int_times), len(times)
#         if len(int_times) == len(times)+1:
#             print "Warning: Advanced retrieval script seems to have missed one spectrum."
#         else:
#             raise ValueError, "The data in the advanced retrieval excel file doesn't seem to match that in the Correction Data file. Different number of entries."
#     
#     
#     total_capture_times = numpy.array(int_times) * numpy.array(n_coadds)
#                
#     #calculate the angles
#     angles = produce_angles_list(angles_obj, times, total_capture_times)
#     
#     #write the output
#     with open(output_file,'w') as ofp:
#         ofp.write("# Date_Time\t\t\tFilename\t\t\tAngle\t\tAngle Error\tCol. Amount\tCol. Amount Error\tIntergration Time\tNum. Coadds\n")
#         i=0
#         while i<len(times):
#             #skip spectra that were taken when we weren't actually scanning (angles should be -1 for these)
#             if angles[i].value <0:
#                 i+=1
#                 continue
#             ofp.write(times[i].strftime("%d/%m/%Y_%H:%M:%S.%f")+"\t\t"+
#                       filenames[i]+"\t\t"+ 
#                       str(round(angles[i].value,3))+"\t\t"+ 
#                       str(round(angles[i].error,3))+"\t\t"+
#                       str(col_amounts[i])+"\t\t"+
#                       str(errors[i])+"\t\t"+
#                       str(round(int_times[i],3))+"\t\t"+
#                       str(n_coadds[i])+"\n")
#             
#             i+=1


def load_adv_retrieval_file(adv_ret_xls_file):
    
    filenames=[]
    times = []
    col_amounts = []
    errors = []
    
    #read the date from the directory that the columns file is in (the date is not stored
    #in the file
    parent_folder = os.path.split(os.path.dirname(os.path.realpath(adv_ret_xls_file)))[-1]
    
    #use regular expression to extract date information
    r = re.match("(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})", parent_folder)
    if r is None:
        raise RuntimeError, "Failed to extract the date from the parent folder."
    
    date_info = r.groupdict()
    record_date = datetime.datetime(int(date_info['year']),int(date_info['month']), int(date_info['day']))
    
    
    data_reader = ExcelRowIter(adv_ret_xls_file)
                    
    #read the rows up to the one containing the column headings and record
    #the column headings
    #skip the first row
    data_reader.next()
    i=0
    missing_files = []
    for row in data_reader:
        if row[3].isspace() or row[3] == "":
            continue
        
        #sometimes the times column is formatted in such a way that it is read as fractional hours since 00:00:00
        #instead of a string
        try:
            times.append(record_date + datetime.timedelta(hours = 24 * float(row[5])))
        except ValueError:
            times.append(datetime.datetime.combine(record_date.date(),datetime.datetime.strptime(row[5],"%H:%M:%S").time()))
        
        filenames.append(row[3])
        if int(row[4]) != i:
            print "Warning: Spectrum number",i,"is missing from the advanced retrieval results!"
            missing_files.append(i)
            i= int(row[4])
        col_amounts.append(float(row[8]))
        errors.append(float(row[9]))
        i+=1    
    #if the times went passed midnight then we have a problem (because the date will not have changed accordingly)
    #to fix this we just look for large backwards steps in the times and add an extra day as appropriate
    i=1
    while i<len(times):
        if times[i]<times[i-1]:
            times[i] += datetime.timedelta(days=1)
        i+=1
    
    return filenames, times, col_amounts, errors



def load_columns_file(columns_file):
    filenames=[]
    times = []
    col_amounts = []
    errors = []
    
    #read the date from the directory that the columns file is in (the date is not stored
    #in the file
    parent_folder = os.path.split(os.path.dirname(os.path.realpath(columns_file)))[-1]
    
    #use regular expression to extract date information
    r = re.match("(?P<year>\d{4})(?P<month>\d{2})(?P<day>\d{2})", parent_folder)
    if r is None:
        raise RuntimeError, "Failed to extract the date from the parent folder."
    
    date_info = r.groupdict()
    record_date = datetime.datetime(int(date_info['year']),int(date_info['month']), int(date_info['day']))
    
    with open(columns_file,"r") as ifp:
        for line in ifp:
            words = line.split()            
            filenames.append(words[1])
            times.append(datetime.datetime.combine(record_date, datetime.datetime.strptime(words[3],"%H:%M:%S").time()))
            col_amounts.append(float(words[4]))
            errors.append(float(words[5]))
           
    #if the times went past midnight then we have a problem (because the date will not have changed accordingly)
    #to fix this we just look for large backwards steps in the times and add an extra day as appropriate
    i=1
    while i<len(times):
        if times[i]<times[i-1]:
            times[i] += datetime.timedelta(days=1)
        i+=1
    
    return filenames, times, col_amounts, errors

def create_angles_for_columns_file(columns_file, corr_data_file, angles_file, output_file, times_file=None):
    
    filenames, times, col_amounts, errors = load_columns_file(columns_file)
    
    
    
    #load the angles file
    angles_obj = AvoScanAngles(angles_file)
    
    #sanity check
    if angles_obj.times[0].date() != times[0].date():
        print angles_obj.times[0].date(), times[0].date()
        raise ValueError, "Your angles file appears to be from a different date to your spectra"
  
    
    #load the integration times and number of coadds from the correction data file
    int_times = []
    n_coadds = []
    with open(corr_data_file,"r") as ifp:
        for line in ifp:
            if line.isspace() or line == "":
                continue #skip blank lines
            words = line.split()
            if len(words) != 4:
                raise ValueError, "Correction Data file has invalid format. Expecting 4 columns"
            int_times.append(float(words[1])/1000.0) #int_time is in milliseconds in the corr_data file - covert to secs here
            n_coadds.append(int(words[2]))
    
    #sanity check 
    if len(int_times) != len(times):
        raise ValueError, "The data in the columns file doesn't seem to match that in the Correction Data file. Different number of entries."
    
    
    #build a times->data mapping for the columns file contents
    col_file_dict = {}
    for i,t in enumerate(times):
        t = int(date2secs(t))
        if col_file_dict.has_key(t):
            col_file_dict[t].append([filenames[i], times[i], col_amounts[i], errors[i], int_times[i], n_coadds[i]])
        else:
            col_file_dict[t] = [[filenames[i], times[i], col_amounts[i], errors[i], int_times[i], n_coadds[i]]]
    
    total_capture_times = numpy.array(int_times) * numpy.array(n_coadds)
    
    if times_file is not None:
        filetimes = load_times_file(times_file)
        
        #build a times to exact times mapping
        time_file_dict = {}
        
        for t in filetimes.values():
            k = int(date2secs(t))
            if time_file_dict.has_key(k):
                time_file_dict[k].append(t)
            else:
                time_file_dict[k] = [t]
        
        for t in sorted(filetimes.values()):
            k = int(date2secs(t))
            try:
                while  len(col_file_dict[k]) < len(time_file_dict[k]):
                    print "pushed time ",t
                    if time_file_dict.has_key(k+1):
                        time_file_dict[k+1]= [time_file_dict[k].pop(-1)] + time_file_dict[k+1]
                    else:
                        time_file_dict[k+1]=[time_file_dict[k].pop(-1)]
            except KeyError:
                pass
            
            
        
        #change the times for those that we have stored
        for k in col_file_dict.keys():
            #reject any column amounts that we don't have a time record for
            if not time_file_dict.has_key(k):
                e = col_file_dict.pop(k)
                print "No time record for spectrum file \'%s\'"%e[0]
                continue
            
            
            if len(col_file_dict[k]) <= len(time_file_dict[k]):
                #it is possible that there are less entries in the columns file than in the times file
                #if bad spectrum files have been deleted. There is then no way to retrieve the correct
                #time for the files (since they will have been renamed) but his should be fairly rare, and
                #does not introduce a huge error - so for now we just ignore it.
                for i in range(len(col_file_dict[k])):
                    col_file_dict[k][i][1] = time_file_dict[k][i]
            
            elif k == int(date2secs(times[0])):
                #we are dealing with the first set of recorded times - so the missing spectra are most likely
                #from the beginning of the list
                while len(col_file_dict[k]) != len(time_file_dict[k]):
                    col_file_dict[k].pop(0)
                for i in range(len(col_file_dict[k])):
                    col_file_dict[k][i][1] = time_file_dict[k][i]
            elif k == int(date2secs(times[-1])):
                #we are dealing with the last set of recorded times - so the missing spectra are most likely
                #from the end of the list
                while len(col_file_dict[k]) != len(time_file_dict[k]):
                    col_file_dict[k].pop(-1)
                for i in range(len(col_file_dict[k])):
                    col_file_dict[k][i][1] = time_file_dict[k][i]
            else:
                
                print "difference = ",len(col_file_dict[k]) - len(time_file_dict[k])
                continue
            
    
        #rebuild the lists from the dicts
        filenames = []
        times = []
        col_amounts = []
        errors = []
        int_times = []
        n_coadds = []
        
        for v in col_file_dict.values():
            for x in v:
                filenames.append(x[0])
                times.append(x[1])
                col_amounts.append(x[2])
                errors.append(x[3])
                int_times.append(x[4])
                n_coadds.append(x[5])
                
                
        
#        if len(filetimes.values()) == len(filenames):
#            #then life is easy - the conversion script didn't miss out any spectra
#            print "Number of spectra in columns file matches number of spectra in times file. Hooray!"
#            times = sorted(filetimes.values())
#        
#        else:
#            #life is difficult! We have to try and figure out which files the conversion script skipped
#            print "Number of spectra in columns file does not match number of spectra in times file. Please be patient while I try to sort this out."
#            #get the spectra directory from the times file (here we assume that all the spectra
#            #are in one dir with no subdirs)
#            import wx
#            spec_dir = wx.DirSelector("Select spectra directory.", defaultPath=os.path.dirname(angles_file))
#                       
#            #build a mapping between the spectrum numbers of the spectrasuite files and
#            #the std files (remembering that some SS files may have been unreadable)
#            ss_files = []
#            for s in doas.spectra_dir.SpectraDirectory(spec_dir):
#                ss_files.append(os.path.basename(s.filename))
#            
#            
#            if len(ss_files) != len(filenames):
#                #then we have a problem
#                raise RuntimeError("Failed to build the SS filename to STD filename mapping")
#            print "len filenames = d, len times = d, len ss_filenames = d",len(filenames),len(times),len(ss_files)
#            for i in range(len(filenames)):
#
#                try:
#                    times[i] = filetimes[ss_files[i]]
#                except KeyError:
#                    print "No time record for file %s"%ss_files[i]
    
               
    #calculate the angles
    if times_file is None:
        #then the best time resolution we are going to get is 1 second
        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=1.0)
    else:
        #time error is not more than 10 ms (probably)
        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=0.01)
    
    #write the output
    with open(output_file,'w') as ofp:
        ofp.write("# Date_Time\t\t\tFilename\t\t\tAngle\t\tAngle Error\tCol. Amount\tCol. Amount Error\tIntergration Time\tNum. Coadds\n")
        i=0
        while i<len(times):
            #skip spectra that were taken when we weren't actually scanning (angles should be -1 for these)
            if angles[i].value <0:
                i+=1
                continue
            ofp.write(times[i].strftime("%d/%m/%Y_%H:%M:%S.%f")+"\t\t"+
                      filenames[i]+"\t\t"+ 
                      str(round(angles[i].value,3))+"\t\t"+ 
                      str(round(angles[i].error,3))+"\t\t"+
                      str(col_amounts[i])+"\t\t"+
                      str(errors[i])+"\t\t"+
                      str(round(int_times[i],3))+"\t\t"+
                      str(n_coadds[i])+"\n")
            
            i+=1
#def create_angles_for_columns_file(columns_file, corr_data_file, angles_file, output_file, times_file=None):
#    
#    filenames, times, col_amounts, errors = load_columns_file(columns_file)
#    
#    #load the angles file
#    angles_obj = AvoScanAngles(angles_file)
#    
#    #sanity check
#    if angles_obj.times[0].date() != times[0].date():
#        print angles_obj.times[0].date(), times[0].date()
#        raise ValueError, "Your angles file appears to be from a different date to your spectra"
#  
#    
#    #load the integration times and number of coadds from the correction data file
#    int_times = []
#    n_coadds = []
#    with open(corr_data_file,"r") as ifp:
#        for line in ifp:
#            if line.isspace() or line == "":
#                continue #skip blank lines
#            words = line.split()
#            if len(words) != 4:
#                raise ValueError, "Correction Data file has invalid format. Expecting 4 columns"
#            int_times.append(float(words[1])/1000.0) #int_time is in milliseconds in the corr_data file - covert to secs here
#            n_coadds.append(int(words[2]))
#    
#    #sanity check 
#    if len(int_times) != len(times):
#        raise ValueError, "The data in the columns file doesn't seem to match that in the Correction Data file. Different number of entries."
#    
#    
#    total_capture_times = numpy.array(int_times) * numpy.array(n_coadds)
#    
#    if times_file is not None:
#        filetimes = load_times_file(times_file)
#        
#        if len(filetimes.values()) == len(filenames):
#            #then life is easy - the conversion script didn't miss out any spectra
#            print "Number of spectra in columns file matches number of spectra in times file. Hooray!"
#            times = sorted(filetimes.values())
#        
#        else:
#            #life is difficult! We have to try and figure out which files the conversion script skipped
#            print "Number of spectra in columns file does not match number of spectra in times file. Please be patient while I try to sort this out."
#            #get the spectra directory from the times file (here we assume that all the spectra
#            #are in one dir with no subdirs)
#            import wx
#            spec_dir = wx.DirSelector("Select spectra directory.", defaultPath=os.path.dirname(angles_file))
#                       
#            #build a mapping between the spectrum numbers of the spectrasuite files and
#            #the std files (remembering that some SS files may have been unreadable)
#            ss_files = []
#            for s in doas.spectra_dir.SpectraDirectory(spec_dir):
#                ss_files.append(os.path.basename(s.filename))
#            
#            
#            if len(ss_files) != len(filenames):
#                #then we have a problem
#                raise RuntimeError("Failed to build the SS filename to STD filename mapping")
#            print "len filenames = d, len times = d, len ss_filenames = d",len(filenames),len(times),len(ss_files)
#            for i in range(len(filenames)):
#
#                try:
#                    times[i] = filetimes[ss_files[i]]
#                except KeyError:
#                    print "No time record for file %s"%ss_files[i]
#    
#               
#    #calculate the angles
#    if times_file is None:
#        #then the best time resolution we are going to get is 1 second
#        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=1.0)
#    else:
#        #time error is not more than 10 ms (probably)
#        angles = produce_angles_list(angles_obj, times, total_capture_times, time_error=0.01)
#    
#    #write the output
#    with open(output_file,'w') as ofp:
#        ofp.write("# Date_Time\t\t\tFilename\t\t\tAngle\t\tAngle Error\tCol. Amount\tCol. Amount Error\tIntergration Time\tNum. Coadds\n")
#        i=0
#        while i<len(times):
#            #skip spectra that were taken when we weren't actually scanning (angles should be -1 for these)
#            if angles[i].value <0:
#                i+=1
#                continue
#            ofp.write(times[i].strftime("%d/%m/%Y_%H:%M:%S.%f")+"\t\t"+
#                      filenames[i]+"\t\t"+ 
#                      str(round(angles[i].value,3))+"\t\t"+ 
#                      str(round(angles[i].error,3))+"\t\t"+
#                      str(col_amounts[i])+"\t\t"+
#                      str(errors[i])+"\t\t"+
#                      str(round(int_times[i],3))+"\t\t"+
#                      str(n_coadds[i])+"\n")
#            
#            i+=1


#define a container class for values and their associated error    
ValueAndError = namedtuple('ValueAndError',['value','error'])

def calc_d(scan_pos1, scan_pos2, delta_pos1, delta_pos2):
    x1,y1,z1 = scan_pos1
    x2,y2,z2 = scan_pos2
    
    delta_x1,delta_y1,delta_z1 = delta_pos1
    delta_x2,delta_y2,delta_z2 = delta_pos2
    
    delta_xr = math.sqrt(delta_x1**2 + delta_x2**2)
    delta_yr = math.sqrt(delta_y1**2 + delta_y2**2)
    delta_zr = math.sqrt(delta_z1**2 + delta_z2**2)
    
    xr = float(x1-x2)
    yr = float(y1-y2)
    zr = float(z1-z2)
    
    d = math.sqrt(xr**2 + yr**2 + zr**2) #straight line distance between the scanners
    
    delta_d = math.sqrt(((xr * delta_xr) / d)**2 + ((yr * delta_yr) / d)**2 + ((zr * delta_zr) / d)**2)
    
    return ValueAndError(d,delta_d)



def split_by_scan(angles, *vars):
    """
    returns an iterator that will split lists/arrays of data by scan (i.e. between start and end angle)
    an arbitrary number of lists of data can be passed in - the iterator will return a list of arrays
    of length len(vars) + 1 with the split angles array at index one, and the remaining data lists
    in order afterwards. The lists will be sorted into ascending angle order.
    
    
    >>> angles = numpy.array([30, 35, 40, 35, 30, 35, 40])
    >>> for a in split_by_scan(angles):
            print a[0]
    [30, 35]
    [30, 35, 40]
    [35, 40]
    >>> for a in split_by_scan(angles, numpy.array([1,2,3,4,5,6,7])):
            print a[1]
    [1, 2]
    [5, 4, 3]
    [6, 7]
    
    
    """
    
    #everything breaks if there are more than two equal angles in a row.
    if numpy.any(numpy.logical_and((angles[1:] == angles[:-1])[:-1], angles[2:] == angles[:-2])):
        idx = numpy.argmax(numpy.logical_and((angles[1:] == angles[:-1])[:-1], angles[2:] == angles[:-2]))
        raise ValueError, "Data at line "+str(idx+2)+" contains three or more repeated angle entries (in a row). Don't know how to split this into scans."
        
    anglegradient= numpy.gradient(angles)
    
    #if there are repeated start or end angles, then you end up with zeros in the gradients.
    #possible zeros at the start need to be dealt with separately, otherwise you end up with
    #the first point being put in a scan of its own.
    if anglegradient[0] == 0:
        anglegradient[0] = anglegradient[1]
        
    if anglegradient[-1] == 0:
        anglegradient[-1] = anglegradient[-2]
    
    firstarray = anglegradient > 0
    secondarray = numpy.copy(firstarray)
    secondarray[1 :] = secondarray[0 :-1]
    secondarray[0] = not secondarray[0] 
    inflectionpoints = numpy.where(firstarray != secondarray)[0]

    for i in range (len(inflectionpoints)-1):
        d = [angles[inflectionpoints[i] : inflectionpoints [i+1]]]
        for l in vars:
            d.append(l[inflectionpoints[i] : inflectionpoints [i+1]])
        yield array_multi_sort(*tuple(d))
    
    #the final point is not an inflection point so now we need to return the final scan
    d = [angles[inflectionpoints [i+1]:]]
    for l in vars:
        d.append(l[inflectionpoints [i+1]:])
    yield array_multi_sort(*tuple(d))



   

        