##PBAR_Zspec.py
#
#
# Package for dealing with Zspec data
# 9/24/2013, JK, change CreateGainCorrectionFile so it's more in line with what we have now.

import csv, pickle, copy, cPickle, time, os, codecs, gc
import numpy as np
import numpy.matlib
#import datetime
import matplotlib.pyplot as plt
from sklearn.lda import LDA
from matplotlib.patches import Rectangle
from xml.dom.minidom import parseString

from PIL import Image
import PBAR_Cargo
try:
    import mlpy
except:
    print("Could not import mlpy")

# BASIC STUFF
# I/O
def ReadZspec(fullfilenameList):
    """ Read Zspec data """
    
    # Check if the input is a string (single file)
    if isinstance(fullfilenameList, str):
        temp = []
        temp.append(fullfilenameList)
        fullfilenameList = temp
    
    dat = list()
    for ii in range(len(fullfilenameList)):
        temp = np.genfromtxt(fullfilenameList[ii], \
                                 delimiter=',', \
                                 skip_header = 0, \
                                 skip_footer = 0, \
                                 dtype = 'uint32')
        if (temp.shape[0] == 257):  # shave off the table labels if present
            temp = temp[1:,:]
        if (temp.shape[1] == 138):
            temp = temp[:,1:-1]
        if (temp.shape[1] == 137):
            temp = temp[:,1:]
        dat.append(temp)
    return dat

def GetDatasetInformation(infoFilename, filenameList):
    """Read dataset information file generated by PBAR_"""
    
    with open(infoFilename, 'r') as fid:
#    fid = open(infoFilename, 'r');
        csvReaderObj = csv.reader(fid, delimiter = '\t')
        datasetName = []
        acquisitionTime = []
        timeStamp = []
        timeNum = []
        header = []
    
        for lineIn in csvReaderObj:
            datasetName.append(lineIn[0])
            timeStamp.append(lineIn[1])
            acquisitionTime.append(lineIn[2])
            timeNum.append(lineIn[3])
            header.append(lineIn[4])
    
        acquisitionTime = np.array(acquisitionTime)
        acquisitionTime = acquisitionTime.astype(float)
        datasetName = np.array(datasetName)
        timeStamp = np.array(timeStamp)
        timeNum = np.array(timeNum)
        timeNum = timeNum.astype(float)
        header = np.array(header)
    
        # Get the information about the dataset
        datasetDescription = [] # header
        datasetTimeStr = [] # dataset time stamp string
        datasetTime = [] # dataset time stamp string
        datasetTimeNum = [] # epoch start time
        datasetAcquisitionTime = [] # acquisition  time
        
        for ii in xrange(len(filenameList)):
            index = np.where(datasetName == filenameList[ii])[0][0]
            datasetDescription.append(header[index])
            datasetTimeStr.append(timeStamp[index])
            datasetTimeNum.append(timeNum[index])
            datasetTime.append(time.localtime(timeNum[index]))
            datasetAcquisitionTime.append(acquisitionTime[index])
    
        datasetDescription = np.array(datasetDescription)
        datasetTimeStr = np.array(datasetTimeStr)
        datasetTimeNum = np.array(datasetTimeNum)
        datasetAcquisitionTime = np.array(datasetAcquisitionTime)

    return (datasetDescription, datasetAcquisitionTime, datasetTime, datasetTimeNum, datasetTimeStr)
    
def ConvertZspecBasicScanToNumpy(filePath, prefixList, sliceList):
    (energy, dat) = ReadZspecBasicScan(filePath, prefixList, sliceList)
    
def ReadZspecBasicScan(filePath, prefixList, sliceList):
    """Reads Basic scan zspec files"""
    #2888-FDFC
    dat = []
    energy = []
    NumberDetectors = 136
    NumberEnergyBins = 32
    for index, prefix in enumerate(prefixList): # cycle through datasets
        dat2 = np.zeros((len(sliceList[index]), NumberDetectors, NumberEnergyBins))
        energy2 = np.zeros(NumberEnergyBins)
        for sliceIndex, slice in enumerate(sliceList[index]):
            fullFilename = os.path.join(filePath, prefix + '-slice{}.csv'.format(slice))
            fid = open(fullFilename, 'rb')
            csvReaderObj = csv.reader(fid)
            # skip first three lines
            lineIn = csvReaderObj.next()
            lineIn = csvReaderObj.next()
            lineIn = csvReaderObj.next()
            for i in xrange(NumberEnergyBins):  # cycle over energy bins
                lineIn = csvReaderObj.next()
                energy2[i] = float(lineIn[0])
                dat2[sliceIndex,:,i] = np.array(lineIn[1:-1]).astype(float)
        dat.append(dat2)
        energy.append(energy2)
    return(energy, dat)

def ReadZspecBasicScanNumpy(fullFilename):
    """Reads Basic scan zspec files"""
    energy = np.array([7.50E+00,1.05E+01,1.35E+01,1.65E+01,1.95E+01,2.25E+01,2.55E+01, \
        2.85E+01,3.15E+01,3.45E+01,3.75E+01,4.05E+01,4.35E+01,4.65E+01,4.95E+01,\
        5.25E+01,5.55E+01,5.85E+01,6.15E+01,6.45E+01,6.75E+01,7.05E+01,7.35E+01, \
        7.65E+01,7.95E+01,8.25E+01,8.55E+01,8.85E+01,9.15E+01,9.45E+01,9.75E+01,1.01E+02])
    dat = np.load(fullFilename)
    if dat.shape[2] > dat.shape[1]:
        dat = dat.transpose([0,2,1])
    return(energy, dat)

def ReadZspecBasicScanPickle(fullFilename):
    """Reads Basic scan zspec files that are pickle files
        4/9/2014    
    """
    energy = np.array([7.50E+00,1.05E+01,1.35E+01,1.65E+01,1.95E+01,2.25E+01,2.55E+01, \
        2.85E+01,3.15E+01,3.45E+01,3.75E+01,4.05E+01,4.35E+01,4.65E+01,4.95E+01,\
        5.25E+01,5.55E+01,5.85E+01,6.15E+01,6.45E+01,6.75E+01,7.05E+01,7.35E+01, \
        7.65E+01,7.95E+01,8.25E+01,8.55E+01,8.85E+01,9.15E+01,9.45E+01,9.75E+01,1.01E+02])
    with open(fullFilename, 'rb') as fid:
        dat = cPickle.load(fid)
#    if (dat.shape[2] > dat.shape[1]) and (dat.shape[2] !=32):  # not sure why I had this
#        dat = dat.transpose([0,2,1])
    return(energy, dat)


def ZspecDetectorLists():
    """
        Lists of good/bad detectors 
    """
    # list of zpsec detector names - Z1 to Z136
    ZspecNameList = np.array(['Z%d'%i for i in np.arange(1, 137)])
    # list of bad detector numbers starting at 1
    badZspecList = np.array([1,2,3,4,5,6,7,8,20,26,31,33,34,38,39,40,44,53,56,62,68,76,80,125,126,127,128,129,130,131,132,133,134,135,136])
    # list of good detector numbers starting at 1
    goodZspecList = np.array([i for i in np.arange(1, 137) if (i not in badZspecList)])
    # list of bad detector names
    badZspecNameList = ZspecNameList[(badZspecList-1)]
    # list of good detector names
    goodZspecNameList = np.array(['Z%d'%i for i in np.arange(1, 137) if (i not in badZspecList)])
    
    badZspecIndices = badZspecList - 1
    goodZspecIndices = goodZspecList - 1
    
    badZspecMask = np.zeros(len(ZspecNameList))
    badZspecMask[badZspecIndices] = True
    badZspecMask = badZspecMask.astype(bool)
    
    goodZspecMask = np.zeros(len(ZspecNameList))
    goodZspecMask[goodZspecIndices] = True
    goodZspecMask = goodZspecMask.astype(bool)
    
    return(goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
        goodZspecIndices, badZspecIndices)


def ZspecDetectorLists2ndSet():
    """
        Lists of good/bad detectors. Second set.
        4/2/2014
    """
    # list of zpsec detector names - Z1 to Z136
    ZspecNameList = np.array(['Z%d'%i for i in np.arange(1, 137)])
    # list of bad detector numbers starting at 1
#    badZspecList = np.array([1,2,3,4,5,6,7,8,20,26,31,33,34,38,39,40,44,53,56,62,68,76,80,125,126,127,128,129,130,131,132,133,134,135,136])
    badZspecList = np.array([1,2,3,4,5,6,7,8,19,20,26,62,68,76,80,121,122,123,123,124, 125,126,127,128,129,130,131,132,133,134,135,136])
    # list of good detector numbers starting at 1
    goodZspecList = np.array([i for i in np.arange(1, 137) if (i not in badZspecList)])
    # list of bad detector names
    badZspecNameList = ZspecNameList[(badZspecList-1)]
    # list of good detector names
    goodZspecNameList = np.array(['Z%d'%i for i in np.arange(1, 137) if (i not in badZspecList)])
    
    badZspecIndices = badZspecList - 1
    goodZspecIndices = goodZspecList - 1
    
    badZspecMask = np.zeros(len(ZspecNameList))
    badZspecMask[badZspecIndices] = True
    badZspecMask = badZspecMask.astype(bool)
    
    goodZspecMask = np.zeros(len(ZspecNameList))
    goodZspecMask[goodZspecIndices] = True
    goodZspecMask = goodZspecMask.astype(bool)
    
    return(goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
        goodZspecIndices, badZspecIndices)

# DISCRIMINATION

def GetZspecDiscrimValues():
    # stuff for zspec
    wBest = np.array([-2.07779891,  1.88095226,  4.65429156])
    pfitmean = np.array([ -3.45931698e-06,   1.80445624e-03,  -3.46939502e-01,  9.03548991e-01])
    pfitsigma = np.array([ -2.90915823e-07,   2.71344856e-04,  -6.81517865e-02, 6.54989380e+00])
    return(wBest, pfitmean, pfitsigma)

def GetZspecDiscrimValuesVer2():
    # get transformation coefficients
    # set 2 - load from file
    xbar_ = np.array([  1.28294017,  23.0544693 ])
    scalings_ = np.array([[ 13.10009414],[ -0.03063372]])
#    setNum = 2
#    fullFilename = os.path.join(basepathSet2, 'zspec%dSet.dat' %setNum)
#    with open(fullFilename ,'rb') as fid:
#        print('Reading %s' %fullFilename)
#        temp = cPickle.load(fid)
#    
#    polyMeanOrderExtendIndex = 22
#    polySigmaOrderExtendIndex = 13
#    
#    pfitmean = temp['pfit']['mean']['PbALL']['extend'][polyMeanOrderExtendIndex]
#    pfitsigma = temp['pfit']['sigma']['PbALL']['extend'][polySigmaOrderExtendIndex]
    pfitmean = np.array([  1.91978614e-48,  -6.41688054e-45,   9.20031283e-42,\
        -7.10780658e-39,   2.83860389e-36,  -1.71724621e-34,\
        -3.85636964e-31,   1.62228140e-28,   4.81822608e-27,\
        -2.87529841e-23,   1.31216003e-20,  -3.24277293e-18,\
         4.67198510e-16,  -2.63356063e-14,  -4.10024594e-12,\
         1.17126640e-09,  -1.42777555e-07,   1.05849453e-05,\
        -4.96436387e-04,   1.39998031e-02,  -1.98851308e-01,\
         4.92225291e-01,   1.33709283e+01])

    pfitsigma = np.array([  5.30942455e-28,  -1.50903534e-24,   1.90735686e-21,\
        -1.41293310e-18,   6.80535247e-16,  -2.23485118e-13,\
         5.10319791e-11,  -8.11199455e-09,   8.84250016e-07,\
        -6.39214905e-05,   2.88555086e-03,  -7.29501758e-02,\
         8.12128936e-01,  -1.40840513e-01])
    
    return(xbar_, scalings_, pfitmean, pfitsigma)

def ZspecBasicReduceSize(datZspecStandardWidth, energy, newWidth):
    """
        Reduces the size of the zspec image to specified width.
        Returns the new image.
        4/9/2014, removed calculation of features.
    """

    # reduce the size of the zspec image by summing adjacent bins
    imageWidth = int(datZspecStandardWidth.shape[0])
    pixelSize = float(imageWidth)/float(newWidth)
    print('Pixel size %d' %pixelSize)
    nearestNeighbors = round((pixelSize-1.0)/2.0)
    print nearestNeighbors
    
    xArray = np.arange(0, imageWidth, pixelSize)
    xArray0 = np.arange(datZspecStandardWidth.shape[0])
    print(len(xArray))
    datZspecSmall = np.zeros((newWidth, datZspecStandardWidth.shape[1], datZspecStandardWidth.shape[2]))
    for i in xrange(len(xArray)):
    #    lowerBound = xArray[i] - pixelSize/2.
    #    upperBound = xArray[i] + pixelSize/2.
        bin = np.argmin(abs(xArray0 - xArray[i]))
        lowerbound = max(bin-nearestNeighbors, 0)  # need max statment to prevent negative indices
        upperBound = min(bin+nearestNeighbors, datZspecStandardWidth.shape[0]-1)
    #    cutt = (np.arange(datZspecStandardWidth.shape[0]) >= lowerBound) & (np.arange(datZspecStandardWidth.shape[0]) <= upperBound)
        datZspecSmall[i,:,:] = datZspecStandardWidth[lowerbound:(upperBound+1),:,:].sum(0)/ (upperBound - lowerbound + 1)
    
    return(datZspecSmall)


def CargoReduceSize(datCargoStandardWidth, newHeight, newWidth):
    # Make reduced size cargo image
    temp = Image.fromarray(datCargoStandardWidth)
    temp = temp.resize((newHeight,newWidth),Image.ANTIALIAS)
    return(np.array(temp))

def CalculateFeaturesZspecBasic(d0, energy, lowerBinThreshold = 1):
    """Calculate zspec basic scan features.
    
        4/23/2014, JK, Added additional moments, kurtosis and skewness
        
    """
#    lowerBinThreshold = 4
    lowerBinThreshold = np.argmin( np.abs( energy - lowerBinThreshold ))
    d = copy.copy(d0)
    d[:,:,:lowerBinThreshold] = 0.0
    
    temp = {}
    
    temp['count'] = d0.sum(2).astype(float)
    temp['count2'] = d.sum(2).astype(float)
    
    # make 3d energy matrix (same size as d)
    # energy runs along the 3rd dimension
    energyMatrix = np.tile(energy, (d.shape[0], d.shape[1], 1))
    temp['binMean'] = (d * energyMatrix).sum(2) / d.sum(2)
    
    binMeanMatrix = np.tile(temp['binMean'], (d.shape[2],1,1)).transpose((1, 2, 0))
    temp['binSTD'] = np.sqrt( (( (energyMatrix - binMeanMatrix)**2) * d.astype(float) ).sum(2) / d.sum(2).astype(float)  )

    temp['binSTD_binMean'] = temp['binSTD'] / temp['binMean']
    
    # higher moments
    temp['binMoment4'] = ( ((energyMatrix - binMeanMatrix)**4) * d.astype(float) ).sum(axis = 2) / d.sum(axis = 2).astype(float)
    temp['binMoment3'] = ( ((energyMatrix - binMeanMatrix)**3) * d.astype(float) ).sum(axis = 2) / d.sum(axis = 2).astype(float)
    
    # skewness and kurtosis
    # this shoult be gain invariant as the both the numerator and denominator are linear with gain
    temp['binSkew'] = temp['binMoment3'] / temp['binSTD'] **3.0
    temp['binKur'] = temp['binMoment4'] / temp['binSTD'] **4.0    

    
    # find the bin that is just above 10, 20
    highlowIndex10 = np.argmin( abs(energy - 10) )
    highlowIndex20 = np.argmin( abs(energy - 20) )
    
    temp['multibin_0_10'] = d[:,:,0:highlowIndex10].sum(axis=2)  # bins 0 to 9, so first ten bins
    temp['multibin_10_256'] = d[:,:,highlowIndex10:].sum(axis=2)
    temp['multibin_0_20'] = d[:,:,0:highlowIndex20].sum(axis=2)
    temp['multibin_20_256'] = d[:,:,highlowIndex20:].sum(axis=2)

    temp['multibin_10_ratio'] = temp['multibin_0_10'].astype(float) / temp['multibin_10_256'].astype(float)
    temp['multibin_20_ratio'] = temp['multibin_0_20'].astype(float) / temp['multibin_20_256'].astype(float)

    # QUANTILES
    datTempCumSum = d.cumsum(axis = 2).astype(float) # cumulative sum
    datTempCumSum = datTempCumSum / np.tile(datTempCumSum[:,:,-1], (32, 1, 1)).transpose((1, 2, 0)) # normalize the cumulative sum
    qIntervals = [10., 25., 50., 75., 90.]
    for j in range(len(qIntervals)):
        statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
        temp[statName] = energy[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)), axis = 2)]
    temp['q_range'] = temp['q_75']- temp['q_25']
    temp['q_range_ratio'] = temp['q_range'] / temp['q_50']
    temp['binMean_q50'] = temp['binMean'] - temp['q_50']

    # Calculate the dist, linear combination
#
#    (wBest, pfitmean, pfitsigma) = GetZspecDiscrimValues()
#
#    # make features array (num features X time slices X number of detectors)
##    featuresMatrix = np.zeros((3, temp['binMean'].shape[0], temp['binMean'].shape[1]))
##    featuresMatrix[0,:,:] = temp['binMean']
##    featuresMatrix[1,:,:] = temp['binSTD']
##    featuresMatrix[2,:,:] = temp['multibin_20_ratio']
##    discrim['dist0']  = featuresMatrix[0,:,:] * wBest[0] + featuresMatrix[1,:,:] * wBest[1] + featuresMatrix[2,:,:] * wBest[2]
#
#    temp['dist0']  = temp['binMean'] * wBest[0] + temp['binSTD'] * wBest[1] + temp['multibin_20_ratio'] * wBest[2]
#    temp['discrim'] =  temp['dist0'] - np.polyval(pfitmean, temp['count'])

    # new discriminant 4/28/2014
    (xbar_, scalings_, pfitmean, pfitsigma) = GetZspecDiscrimValuesVer2()

    temp['dist0'] = (temp['binSkew'] - xbar_[0]) * scalings_[0] + (temp['binMean'] - xbar_[1]) * scalings_[1]

    #remove offset
    temp['discrim'] = temp['dist0'] - np.polyval(pfitmean, temp['count2'])

    return(temp)

def CalculateFeaturesZspecBasicInterp(dInput, energyInput, lowerBinThreshold = 1):
    """Calculate zspec basic scan features.
    
        4/23/2014, JK, Added additional moments, kurtosis and skewness
        5/1/2014, JK, interpolation
    """
    
    # new spectrum bins
    energy = np.arange(energyInput[0], energyInput[-1], 1)
#    print energy
    # threshold bin
    lowerBinThreshold = np.argmin( np.abs( energy - lowerBinThreshold ))
#    print(lowerBinThreshold)
    
    # make container for interpolated spectrum
    d0 = np.zeros((dInput.shape[0], dInput.shape[1], len(energy)))
    # interpolate each pixel at a time
    for t in xrange(dInput.shape[0]):  # cycle through slices
        if (t % 100 == 0):
            print('slice %d' %t)
        for ch in xrange(dInput.shape[1]): # cycle through the channels
            yy = np.interp(energy, energyInput, dInput[t,ch,:])
            # normalize
            yy = yy * dInput[t,ch,:].sum() / yy.sum()
            d0[t,ch,:] = copy.copy(yy)
    
    # array with lower bins zeroed
    d = copy.deepcopy(d0)
    for t in xrange(d0.shape[0]):  # cycle through slices
        for ch in xrange(d0.shape[1]): # cycle through the channels
            d[t,ch,:lowerBinThreshold] = 0.0

    temp = {}
    
    temp['count'] = d0.sum(2).astype(float)
    temp['count2'] = d.sum(2).astype(float)  # excludes the lower bins
    
    # make 3d energy matrix (same size as d)
    # energy runs along the 3rd dimension
    energyMatrix = np.tile(energy, (d.shape[0], d.shape[1], 1))
    temp['binMean'] = (d * energyMatrix).sum(2) / d.sum(2)
    
    binMeanMatrix = np.tile(temp['binMean'], (d.shape[2],1,1)).transpose((1, 2, 0))
    temp['binSTD'] = np.sqrt( (( (energyMatrix - binMeanMatrix)**2) * d.astype(float) ).sum(2) / d.sum(2).astype(float)  )

    temp['binSTD_binMean'] = temp['binSTD'] / temp['binMean']
    
    # higher moments
    temp['binMoment4'] = ( ((energyMatrix - binMeanMatrix)**4) * d.astype(float) ).sum(axis = 2) / d.sum(axis = 2).astype(float)
    temp['binMoment3'] = ( ((energyMatrix - binMeanMatrix)**3) * d.astype(float) ).sum(axis = 2) / d.sum(axis = 2).astype(float)
    
    # skewness and kurtosis
    # this shoult be gain invariant as the both the numerator and denominator are linear with gain
    temp['binSkew'] = temp['binMoment3'] / temp['binSTD'] **3.0
    temp['binKur'] = temp['binMoment4'] / temp['binSTD'] **4.0
    
    # find the bin that is just above 10, 20
    highlowIndex10 = np.argmin( abs(energy - 10.0) )
    highlowIndex20 = np.argmin( abs(energy - 20.0) )
    
    temp['multibin_0_10'] = d[:,:,0:highlowIndex10].sum(axis=2)  # bins 0 to 9, so first ten bins
    temp['multibin_10_256'] = d[:,:,highlowIndex10:].sum(axis=2)
    temp['multibin_0_20'] = d[:,:,0:highlowIndex20].sum(axis=2)
    temp['multibin_20_256'] = d[:,:,highlowIndex20:].sum(axis=2)

    temp['multibin_10_ratio'] = temp['multibin_0_10'].astype(float) / temp['multibin_10_256'].astype(float)
    temp['multibin_20_ratio'] = temp['multibin_0_20'].astype(float) / temp['multibin_20_256'].astype(float)

    # QUANTILES
    datTempCumSum = d.cumsum(axis = 2).astype(float) # cumulative sum
    datTempCumSum = datTempCumSum / np.tile(datTempCumSum[:,:,-1], (d.shape[2], 1, 1)).transpose((1, 2, 0)) # normalize the cumulative sum
    qIntervals = [10., 25., 50., 75., 90.]
    for j in range(len(qIntervals)):
        statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
        temp[statName] = energy[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)), axis = 2)]
    temp['q_range'] = temp['q_75']- temp['q_25']
    temp['q_range_ratio'] = temp['q_range'] / temp['q_50']
    temp['binMean_q50'] = temp['binMean'] - temp['q_50']

    # Calculate the dist, linear combination

#    (wBest, pfitmean, pfitsigma) = GetZspecDiscrimValues()

    # make features array (num features X time slices X number of detectors)
#    featuresMatrix = np.zeros((3, temp['binMean'].shape[0], temp['binMean'].shape[1]))
#    featuresMatrix[0,:,:] = temp['binMean']
#    featuresMatrix[1,:,:] = temp['binSTD']
#    featuresMatrix[2,:,:] = temp['multibin_20_ratio']
#    discrim['dist0']  = featuresMatrix[0,:,:] * wBest[0] + featuresMatrix[1,:,:] * wBest[1] + featuresMatrix[2,:,:] * wBest[2]

#    temp['dist0']  = temp['binMean'] * wBest[0] + temp['binSTD'] * wBest[1] + temp['multibin_20_ratio'] * wBest[2]
#    temp['discrim'] =  temp['dist0'] - np.polyval(pfitmean, temp['count'])


    # new discriminant 4/28/2014
    (xbar_, scalings_, pfitmean, pfitsigma) = GetZspecDiscrimValuesVer2()

    temp['dist0'] = (temp['binSkew'] - xbar_[0]) * scalings_[0] + (temp['binMean'] - xbar_[1]) * scalings_[1]

    #remove offset
    temp['discrim'] = temp['dist0'] - np.polyval(pfitmean, temp['count2'])

    return(temp)
    
    
def CalculateFeaturesZspecSingleSpectrum(d0, energy, lowerBinThreshold = 1):
    """Calculate features for a single zspec spectrum.  Assumed normlized to per x-ray pulse
       5/6/2014, JK, Added additional moments    
    """
    temp = {}
    
    # find lower threshold bin and set to zero all entries below this
    lowerBinThreshold = np.argmin( np.abs( energy - lowerBinThreshold ))
    d = copy.copy(d0)
    if (lowerBinThreshold > 0):
        d[:,:,:lowerBinThreshold] = 0.0
    
    temp['count'] = d0.sum().astype(float)
    temp['count2'] = d.sum().astype(float)
    
    temp['binMean'] = (d * energy).sum() / d.sum()
    
#    binMeanMatrix = np.tile(temp['binMean'], (d.shape[2],1,1)).transpose((1, 2, 0))
#    temp['binSTD'] = np.sqrt( (( (energyMatrix - binMeanMatrix)**2) * d.astype(float) ).sum(2) / d.sum(2).astype(float)  )

    temp['binSTD'] = np.sqrt( (( (energy - temp['binMean'])**2) * d.astype(float) ).sum() / d.sum().astype(float)  )

    temp['binSTD_binMean'] = temp['binSTD'] / temp['binMean']

    # higher moments
    temp['binMoment4'] = ( ((energy - temp['binMean'])**4) * d.astype(float) ).sum() / d.sum().astype(float)
    temp['binMoment3'] = ( ((energy - temp['binMean'])**3) * d.astype(float) ).sum() / d.sum().astype(float)
    
    # skewness and kurtosis
    # this shoult be gain invariant as the both the numerator and denominator are linear with gain
    temp['binSkew'] = temp['binMoment3'] / temp['binSTD'] **3.0
    temp['binKur'] = temp['binMoment4'] / temp['binSTD'] **4.0
    
    # find the bin that is just above 10, 20
    highlowIndex10 = np.argmin( abs(energy - 10) )
    highlowIndex20 = np.argmin( abs(energy - 20) )
    
    temp['multibin_0_10'] = d[0:highlowIndex10].sum()  # bins 0 to 9, so first ten bins
    temp['multibin_10_256'] = d[highlowIndex10:].sum()
    temp['multibin_0_20'] = d[0:highlowIndex20].sum()
    temp['multibin_20_256'] = d[highlowIndex20:].sum()

    temp['multibin_10_ratio'] = temp['multibin_0_10'].astype(float) / temp['multibin_10_256'].astype(float)
    temp['multibin_20_ratio'] = temp['multibin_0_20'].astype(float) / temp['multibin_20_256'].astype(float)

    # QUANTILES
    datTempCumSum = d.cumsum().astype(float) / d.sum().astype(float) # cumulative sum
#    datTempCumSum = datTempCumSum / np.tile(datTempCumSum[:,:,-1], (32, 1, 1)).transpose((1, 2, 0)) # normalize the cumulative sum
    qIntervals = [10., 25., 50., 75., 90.]
    for j in range(len(qIntervals)):
        statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
        temp[statName] = energy[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)))]
    temp['q_range'] = temp['q_75']- temp['q_25']
    temp['q_range_ratio'] = temp['q_range'] / temp['q_50']
    temp['binMean_q50'] = temp['binMean'] - temp['q_50']


    # Calculate the dist, linear combination
    
#    (wBest, pfitmean, pfitsigma) = GetZspecDiscrimValues()

    # make features array (num features X time slices X number of detectors)
#    featuresMatrix = np.zeros((3, temp['binMean'].shape[0], temp['binMean'].shape[1]))
#    featuresMatrix[0,:,:] = temp['binMean']
#    featuresMatrix[1,:,:] = temp['binSTD']
#    featuresMatrix[2,:,:] = temp['multibin_20_ratio']
#    discrim['dist0']  = featuresMatrix[0,:,:] * wBest[0] + featuresMatrix[1,:,:] * wBest[1] + featuresMatrix[2,:,:] * wBest[2]
#
#    temp['dist0']  = temp['binMean'] * wBest[0] + temp['binSTD'] * wBest[1] + temp['multibin_20_ratio'] * wBest[2]
#    temp['discrim'] =  temp['dist0'] - np.polyval(pfitmean, temp['count'])

    # new discriminant 4/28/2014
    (xbar_, scalings_, pfitmean, pfitsigma) = GetZspecDiscrimValuesVer2()

    temp['dist0'] = (temp['binSkew'] - xbar_[0]) * scalings_[0] + (temp['binMean'] - xbar_[1]) * scalings_[1]

    #remove offset
    temp['discrim'] = temp['dist0'] - np.polyval(pfitmean, temp['count2'])

    return(temp)

def PlotZspecBasicHighZFinder(fullFilenameCargo, fullFilenameZspec, fullFilenameMarker, \
    plotSaveDir, newWidth, newHeight, xCargoInternalBounds, yCargoInternalBounds, cargoCountRange, windowList, discrimThresholdList, fractionPixelsList, figureSize):
    """
        Reads in standard width cargo image, read in marker fiels, read in standard-width zspec cargo image, 
        calculate features, reduces size of the image and make plots for all combinations of window sizes,
        threshold and feature calculation format.  Saves cargo, zspec and dist scatter plot images.
    """
    (goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
            goodZspecIndices, badZspecIndices) = ZspecDetectorLists()

    a, filename = os.path.split(fullFilenameZspec)
    zspecScanNumber = filename[0:4]

    energy = np.array([7.50E+00,1.05E+01,1.35E+01,1.65E+01,1.95E+01,2.25E+01,2.55E+01, \
        2.85E+01,3.15E+01,3.45E+01,3.75E+01,4.05E+01,4.35E+01,4.65E+01,4.95E+01,\
        5.25E+01,5.55E+01,5.85E+01,6.15E+01,6.45E+01,6.75E+01,7.05E+01,7.35E+01, \
        7.65E+01,7.95E+01,8.25E+01,8.55E+01,8.85E+01,9.15E+01,9.45E+01,9.75E+01,1.01E+02])

    # read in the image
    datCargoStandardWidth = np.load(fullFilenameCargo) 
 
    # read in the markerfiles
    markerList = PBAR_Cargo.ReadCargoMarker(fullFilenameMarker)
   
    # calculate discrimination stuff
    datZspecStandardWidth = np.load(fullFilenameZspec)
    discrim = CalculateFeaturesZspecBasic(datZspecStandardWidth, energy)   
   
    # Reduce the size of the zspec image by summing adjacent bins
    (datZspecSmall, discrimSmall) = ZspecBasicReduceSize(datZspecStandardWidth, energy, newWidth)
    
    # Modify the marker files
    multiplier = newWidth/float(datZspecStandardWidth.shape[0])
    offset = 0.0
    markerSmallList = ModifyMarkersXPosition(markerList, multiplier, offset)
    
    ##################
    ##  MAKE MASKs  ##
    
    # Make reduced size cargo image
    datCargoSmall = CargoReduceSize(datCargoStandardWidth, newHeight, newWidth)
    datCargoSmallMask = CreateCargoMask(datCargoSmall, cargoCountRange, xCargoInternalBounds, yCargoInternalBounds)
    
    ## Look for high z stuff

    mask = copy.copy(datCargoSmallMask)
    
    # loop over window, discrim threshold and fractionPixels
    for (windowIndex, window) in enumerate(windowList):
        for (discrimThresholdIndex, discrimThreshold) in enumerate(discrimThresholdList):
            for (fractionPixelsIndex, fractionPixels) in enumerate(fractionPixelsList):
                
                #
                print('window: %d, %d; discrim threshold: %d; fractionPixles: %3.2f' %(window[0], window[1], discrimThreshold, fractionPixels))
                
                # look for potential stuff
#                    potential = PBAR_Zspec.BasicScanZspecHighZFinder(discrimSmall, datZspecSmall, mask, energy, window, discrimThreshold, fractionPixels)
#                        
                fullFilenamePotential = \
                    fullFilenameMarker.replace('cargomarker', \
                        '%d_%d_%d_%3.2f.potential' %(window[0], window[1], discrimThreshold, fractionPixels) )
#            
                with open(fullFilenamePotential, 'rb') as fid:
                    potential = cPickle.load(fid)
                    print('Read %s' %fullFilenamePotential)
        
                ###########
                ## PLOTS ##
                ###########

                ## Small Cargo image ##
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                
                plt.imshow(datCargoSmall.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                #plt.imshow(discrimSmall['discrim'].T, interpolation = 'nearest', aspect='auto', cmap = cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels))
                
                plt.axis((0, 600, 136, 0))

                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'b', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, edgecolor = 'blue', facecolor = 'blue', marker = 'x', alpha = 0.5)
                
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_Cargo.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                
                ## Zspec image, discrim, only mask points shown  ##
                
                climit = (discrimThreshold, 40)
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                mask = ( discrimSmall['count'] > 0 ) & (discrimSmall['count'] < 400) & datCargoSmallMask
                
                temp  = copy.copy(discrimSmall['discrim'])
                cutt = ~np.isnan(temp) & ~np.isinf(temp)
                temp[~cutt] = min(temp[cutt])
                
                temp = discrimSmall['discrim'] * mask
                minValue = temp[mask].min()
                temp[~mask] = minValue
                
                plt.imshow(temp.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                #plt.imshow(discrimSmall['discrim'].T, interpolation = 'nearest', aspect='auto', cmap = cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels))
                
                plt.axis((0, 600, 136, 0))
                plt.clim(climit)
                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'b', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, edgecolor = 'blue', facecolor = 'blue', marker = 'x', alpha = 0.5)
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_ZspecBasic.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                
                
                ##  Zspec small, scatter plot  ##
                removeOffset = True
                
                plt.figure(figsize = figureSize)
                plt.grid()
                
                feature1 = 'count'
                feature2 = 'discrim'
                
                mask = (discrimSmall['count'] > 0 ) & (discrimSmall['count'] < 400) & datCargoSmallMask
                
                mask = mask[:,goodZspecIndices]
                
                x1 = discrimSmall[feature1][:,goodZspecIndices][mask].flatten()
                y1 = discrimSmall[feature2][:,goodZspecIndices][mask].flatten()
                
                x1 = discrimSmall[feature1][:,goodZspecIndices].flatten()
                y1 = discrimSmall[feature2][:,goodZspecIndices].flatten()
                
                plt.plot(x1, y1, '.k', alpha = 0.15, markersize = 10, label = 'In Mask')
                
                marker = markerSmallList
                for i, mark in enumerate(marker):
                    x_range = np.array((mark['rec_left'], mark['rec_right'])) # left < right
                    y_range = np.array((mark['rec_top'], mark['rec_bottom'])) # top  < bottom
                    xarray = np.arange(discrimSmall[feature1].shape[0])
                    yarray = np.arange(discrimSmall[feature1].shape[1])
                    xcut = (xarray > x_range[0]) & (xarray < x_range[1])
                    ycut = (yarray > y_range[0]) & (yarray < y_range[1])
                    x = discrimSmall[feature1][:,goodZspecMask & ycut][xcut,:].flatten()
                    y = discrimSmall[feature2][:,goodZspecMask & ycut][xcut,:].flatten()
                
                    try:
                        # high density stuff
                        if mark['target'][0] == 'S':
                            plt.plot(x, y, 'db', markersize = 8, alpha  = 0.3, label = mark['target'])
                        elif mark['target'][0] == 'W':
                            plt.plot(x, y, 'vb', markersize = 8, alpha  = 0.3, label = mark['target'])
                        elif mark['target'][0] == 'D':
                            plt.plot(x, y, 'sb', markersize = 8, alpha  = 0.3, label = mark['target'])
                        elif mark['target'][0] == 'P':
                            plt.plot(x, y, '*b', markersize = 8, alpha  = 0.3, label = mark['target'])
                        elif mark['target'][0] == 'F':# low density stuff
                            plt.plot(x, y, 'or', markersize = 8, alpha  = 0.3, label = mark['target'])
                        else:
                            plt.plot(x, y, 'om', markersize = 8, alpha  = 0.3, label = mark['target'])
                    except:
                        plt.plot(x, y, 'om', markersize = 8, alpha  = 0.3, label = mark['target'])
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                        plt.scatter(pot['discrim']['count'], pot['discrim']['discrim'], color = 'b', \
                        s = 150 * pot['window'].mean(), linewidth = pot['window'].mean() *2.0, edgecolor = 'green', facecolor = 'blue', marker = 'x', alpha = 0.5)

                plt.legend()
                plt.title('%s, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels))
                plt.axis((0, 400, -50, 50))
                
#                plt.show()
            
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_ZspecBasicScatter.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                gc.collect()
    gc.collect()
    
    
def PlotZspecBasicHighZFinderVer2(fullFilenameCargo, fullFilenameZspec, fullFilenameMarker, \
    plotSaveDir, newWidth, newHeight, xCargoInternalBounds, yCargoInternalBounds, cargoCountRange, ZspecCountRange, windowList, discrimThresholdList, fractionPixelsList, figureSize):
    """
        Reads in standard width cargo image, read in marker fiels, read in standard-width zspec cargo image, 
        calculate features, reduces size of the image and make plots for all combinations of window sizes,
        threshold and feature calculation format.  Saves cargo, zspec and dist scatter plot images.
    """
    MINBIN = 1
    (goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
            goodZspecIndices, badZspecIndices) = ZspecDetectorLists()

    a, filename = os.path.split(fullFilenameZspec)
    zspecScanNumber = filename[0:4]

    # Load zspec, reduce size and calculate discirminants
    # the discriminant we care about, discrim, already have offset removed.
    energy, datZspecStandardWidth = ReadZspecBasicScanPickle(fullFilenameZspec)
    # Reduce the size of the zspec image by summing adjacent bins
    datZspecSmall = ZspecBasicReduceSize(datZspecStandardWidth, energy, newWidth)
    discrimSmall = CalculateFeaturesZspecBasic(datZspecSmall, energy, MINBIN)

    # read in the cargo image
    print('Loading %s' %fullFilenameCargo)
    with open(fullFilenameCargo, 'rb') as fid:
        datCargoStandardWidth = cPickle.load(fid)
 
    # read in the markerfiles
    print('Loading %s' %fullFilenameMarker) 
    markerList = PBAR_Cargo.ReadCargoMarker(fullFilenameMarker)
    
    # Modify the marker files
    multiplier = newWidth/float(datZspecStandardWidth.shape[0])
    offset = 0.0
    markerSmallList = ModifyMarkersXPosition(markerList, multiplier, offset)
    
    ##################
    ##  MAKE MASKs  ##
    
    # Make reduced size cargo image
    datCargoSmall = CargoReduceSize(datCargoStandardWidth, newHeight, newWidth)
    datCargoSmallMask = CreateCargoMask(datCargoSmall, cargoCountRange, xCargoInternalBounds, yCargoInternalBounds)
    zspecCountMask = (discrimSmall['count'] > ZspecCountRange[0]) & (discrimSmall['count'] < ZspecCountRange[1])
    ## Look for high z stuff
#    maskGoodPixels = datCargoSmallMask & zspecCountMask
    maskGoodPixels= copy.copy(datCargoSmallMask)
    
    # loop over window, discrim threshold and fractionPixels
    for (windowIndex, window) in enumerate(windowList):
        for (discrimThresholdIndex, discrimThreshold) in enumerate(discrimThresholdList):
            for (fractionPixelsIndex, fractionPixels) in enumerate(fractionPixelsList):
                
                #
                print('window: %d, %d; discrim threshold: %d; fractionPixles: %3.2f' %(window[0], window[1], discrimThreshold, fractionPixels))
                
                # look for potential stuff
                potential = BasicScanZspecHighZFinderVer2(discrimSmall, datZspecSmall, maskGoodPixels, energy, window, discrimThreshold, ZspecCountRange, fractionPixels)

                fullFilenamePotential = \
                    fullFilenameMarker.replace('cargomarkerSW', \
                        '%d_%d_%d_%3.2f.potential' %(window[0], window[1], discrimThreshold, fractionPixels) )

                with open(fullFilenamePotential, 'wb') as fid:
                    cPickle.dump(potential, fid, 2)
                    print('Wrote %s' %fullFilenamePotential)
        
                ###########
                ## PLOTS ##
                ###########

                ## Small Cargo image ##
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                
                plt.imshow(datCargoSmall.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                #plt.imshow(discrimSmall['discrim'].T, interpolation = 'nearest', aspect='auto', cmap = cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, Rad, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels))
                
                plt.axis((0, 600, 136, 0))

                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'm', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, edgecolor = 'magenta', facecolor = 'magenta', marker = 'x', alpha = 0.5)
                
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_Cargo.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                

                ## mask##
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                # set bad pixels to lowest value so shows up as black in image
                
                plt.imshow(maskGoodPixels.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                #plt.imshow(discrimSmall['discrim'].T, interpolation = 'nearest', aspect='auto', cmap = cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, mask, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f, #Found %d' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels, len(potential)))
                
                plt.axis((0, 600, 136, 0))

                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'm', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, \
                            edgecolor = 'magenta', facecolor = 'magenta', marker = 'x', alpha = 0.5)
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_Mask.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                
                
                
                ## Zspec image, discrim, only mask points shown  ##
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                # set bad pixels to lowest value so shows up as black in image
                temp = copy.copy(discrimSmall['discrim'])
                mask = maskGoodPixels & ~np.isnan(temp)
                minValue = temp[mask].min()
                maxValue = temp[mask].max()

                minValue = -40
                maxValue = 40
                temp[~maskGoodPixels] = minValue
                temp[np.isnan(temp)] = minValue
                temp[temp > maxValue] = maxValue
                temp[temp < minValue] = minValue

                
                print minValue
                print maxValue

#                temp = discrimSmall['discrim'] * maskGoodPixels
#                minValue = temp[maskGoodPixels].min()
#                temp[~maskGoodPixels] = minValue
                
                plt.imshow(temp.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, Zspec, discrim, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f, #Found %d' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels, len(potential)))
                
                plt.axis((0, 600, 136, 0))

#                climit = (discrimThreshold, 40)

#                plt.clim(climit)
                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'm', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, edgecolor = 'magenta', facecolor = 'magenta', marker = 'x', alpha = 0.5)
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_ZspecBasic.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')
                

                ## Mask of rad and zspec discriminant > 0, added 4/20/2014 ##
                
                plotMarkers = True
                plotPotential = True

                plt.figure(figsize = figureSize)
                plt.grid()
                
                temp = copy.copy(discrimSmall['discrim'])
                # in rad count range, is not nan and discrim > 0
                mask = maskGoodPixels & ~np.isnan(temp) & (temp > 0)

                plt.imshow(mask.T, interpolation = 'nearest', aspect='auto', cmap = plt.cm.Greys_r)
                
                plt.colorbar()
                plt.ylabel('Detector Number')
                plt.xlabel('Time Slice')
                plt.title('%s, Good Rad, Zspec discrim > 0, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f, #Found %d' \
                    %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels, len(potential)))
                
                plt.axis((0, 600, 136, 0))

#                climit = (discrimThreshold, 40)

#                plt.clim(climit)
                if plotMarkers:
                    marker = markerSmallList
                    for i, mark in enumerate( marker):
                        x = mark['rec_left'], mark['rec_right'], mark['rec_right'], mark['rec_left'], mark['rec_left']
                        y = mark['rec_bottom'], mark['rec_bottom'], mark['rec_top'], mark['rec_top'], mark['rec_bottom']
                        plt.plot(x, y, 'r')
                        plt.plot(mark['x'], mark['y'], 'xr')
                        if 'left' in mark:
                            plt.plot(mark['left']['x'], mark['left']['y'], 'gx')
                        if 'right' in mark:
                            plt.plot(mark['right']['x'], mark['right']['y'], 'go')
                        plt.text(max(x), min(y), mark['target'], color = 'g', fontsize = 14)
                
                if plotPotential:
                    for (potIndex, pot) in enumerate(potential):
                #        plt.plot(pot['boxCenter'][0], pot['boxCenter'][1], 'xb', markersize = 14)
                        plt.scatter(pot['boxCenter'][0], pot['boxCenter'][1], color = 'm', \
                            s = 50 * pot['window'].mean(), linewidth = pot['window'].mean()/2.0, edgecolor = 'magenta', facecolor = 'magenta', marker = 'x', alpha = 0.5)
#                plt.show()
                # save image to file
                plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_ZspecBasicAboveZeroRadMask.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels))
                plt.savefig(plotFullFilename)
                print('Wrote: %s' %plotFullFilename)
                plt.close('all')                
                
                
                ##  Zspec small, SCATTER plot  ##
                
                featureList = ['discrim', 'dist0', 'binMean', 'binSkew']
                for feature2 in featureList:
                    plt.figure(figsize = figureSize)
                    plt.grid()
                    
                    feature1 = 'count'
#                    feature2 = 'discrim'
    
                    mask = maskGoodPixels[:,goodZspecIndices]
                    
                    x1_mask = discrimSmall[feature1][:,goodZspecIndices][mask].flatten()
                    y1_mask = discrimSmall[feature2][:,goodZspecIndices][mask].flatten()
                    
    #                # include all points
    #                x1 = discrimSmall[feature1][:,goodZspecIndices].flatten()
    #                y1 = discrimSmall[feature2][:,goodZspecIndices].flatten()
                    
                    plt.plot(x1_mask, y1_mask, '.k', alpha = 0.15, markersize = 10, label = 'In Mask')
                                    
                    marker = markerSmallList
                    for i, mark in enumerate(marker):
                        x_range = np.array((mark['rec_left'], mark['rec_right'])) # left < right
                        y_range = np.array((mark['rec_top'], mark['rec_bottom'])) # top  < bottom
                        xarray = np.arange(discrimSmall[feature1].shape[0])
                        yarray = np.arange(discrimSmall[feature1].shape[1])
                        xcut = (xarray > x_range[0]) & (xarray < x_range[1])
                        ycut = (yarray > y_range[0]) & (yarray < y_range[1])
                        x = discrimSmall[feature1][:,goodZspecMask & ycut][xcut,:].flatten()
                        y = discrimSmall[feature2][:,goodZspecMask & ycut][xcut,:].flatten()
                    
                        try:
                            # high density stuff
                            if mark['target'][0] == 'S':
                                plt.plot(x, y, 'db', markersize = 8, alpha  = 0.3, label = mark['target'])
                            elif mark['target'][0] == 'W':
                                plt.plot(x, y, 'vb', markersize = 8, alpha  = 0.3, label = mark['target'])
                            elif mark['target'][0] == 'D':
                                plt.plot(x, y, 'sb', markersize = 8, alpha  = 0.3, label = mark['target'])
                            elif mark['target'][0] == 'P':
                                plt.plot(x, y, '*b', markersize = 8, alpha  = 0.3, label = mark['target'])
                            elif mark['target'][0] == 'F':# low density stuff
                                plt.plot(x, y, 'or', markersize = 8, alpha  = 0.3, label = mark['target'])
                            else:
                                plt.plot(x, y, 'om', markersize = 8, alpha  = 0.3, label = mark['target'])
                        except:
                            plt.plot(x, y, 'om', markersize = 8, alpha  = 0.3, label = mark['target'])
                    
                    
                    if plotPotential:
                        for (potIndex, pot) in enumerate(potential):
                            plt.scatter(pot['discrim']['count'], pot['discrim'][feature2], color = 'b', \
                            s = 150 * pot['window'].mean(), linewidth = pot['window'].mean() *2.0, edgecolor = 'b', facecolor = 'b', marker = 'x', alpha = 0.5)
    
                    plt.legend()
                    plt.xlabel(feature1, fontsize = 16)
                    plt.ylabel(feature2, fontsize = 16)
    
                    plt.title('%s, window: %d, %d, disrim threshold: %d, fractionPixels %3.2f' \
                        %(fullFilenameZspec, window[0], window[1], discrimThreshold, fractionPixels))
                    if feature2 == 'discrim':
                        plt.axis((0, 400, -50, 50))
                    else:
                        plt.xlim((0, 400))
                    
    #                plt.show()
                    # save image to file
                    plotFullFilename = os.path.join(plotSaveDir, '%s_%d_%d_%d_%3.2f_%s_ZspecBasicScatter.png' %(zspecScanNumber, window[0], window[1], discrimThreshold, fractionPixels, feature2))
                    plt.savefig(plotFullFilename)
                    print('Wrote: %s' %plotFullFilename)
                    plt.close('all')
                
                gc.collect()
    gc.collect()
    
def ReadRad(basepath):
    """ Read radiography data """
    # read in the radiography data
    radMap = np.genfromtxt(basepath + '\\' + 'arrayResponse.txt', delimiter = '\t', skip_header = 1, dtype = str)

    # load all the radiography data found in radMap and put into a dictionary
    datRad = dict()
    for ii in range(0, radMap.shape[0]):
        dsName = radMap[ii,0]
        if (radMap[ii,3] != ''):
            radName = radMap[ii,3]
            fullFileName = basepath + '\\' + 'Array-Response' + '\\' + radName + '.csv'
            temp = np.genfromtxt(fullFileName, delimiter = '\t', skip_header = 1, dtype = str)
            datRad[dsName] = temp.astype(float)
    # create rad array in size of zspec
    zspecToRad = dict()
    for i in range(0,136):
        zspecToRad[i] = np.array((0, 1, 2 ,3)) + i*4
    datRadZspec = dict()
    radDatasetNames = np.array(datRad.keys())
    for i in range(0,len(radDatasetNames)):
        datRadZspec[radDatasetNames[i]] = np.zeros(len(zspecToRad))
        for j in range(0,len(zspecToRad)):
            datRadZspec[radDatasetNames[i]][j] = datRad[radDatasetNames[i]][zspecToRad[j]].mean()
    return datRad, datRadZspec,  radMap

# make masks

##################
##  MAKE MASKs  ##

def CreateCargoMask(datCargoSmall, cargoCountRange, xCargoInternalBounds, yCargoInternalBounds):
    """
        Creates mask
        
    """
    from skimage import morphology
    
    datCargoSmallMask = ( datCargoSmall >= cargoCountRange[0] ) & \
        ( datCargoSmall <= cargoCountRange[1] )
    
    #2) smaller mask
    #remove cargo edges
    #remove small objects
    
    datCargoSmallMask[0:xCargoInternalBounds[0],:] = False
    datCargoSmallMask[xCargoInternalBounds[1]:,:] = False
    datCargoSmallMask[:,0:yCargoInternalBounds[0]] = False
    datCargoSmallMask[:,yCargoInternalBounds[1]:]  = False
    
    datCargoSmallMaskCleaned = morphology.remove_small_objects(datCargoSmallMask.astype(int), 2)

    return(datCargoSmallMaskCleaned.astype(bool))

# 
def BasicScanZspecHighZFinder(discrimSmall, datZspecSmall, mask, energy, windowListInput, discrimThreshold, fractionPixels):
    """
        Looks for high z material.
        The windows are boxes
        
        10/28/2013
    """
    (wBest, pfitmean, pfitsigma) = GetZspecDiscrimValues()
    
    # see if it is a list
    try:
        windowListInput[0][0]+1 # should be fine if it is a list
        windowList = windowListInput
    except:
        windowList = [windowListInput]  # turn it into a list
    
    # make list for storing potentials
    potential = []
    for (windowIndex, window) in enumerate(windowList):
        for i in np.arange(discrimSmall['discrim'].shape[0] - window[0]): # cycle through x

            xrange = np.array( (i, i+ window[0]) ) # range in x (inclusive)
            # skip slice if all are negatives
            if mask[xrange[0]:(xrange[1]),:].sum() == 0:
                continue
                
            if (i % 100 == 0):
                print('Column %d' %i)
            for j in np.arange(discrimSmall['discrim'].shape[1] - window[1]): # cycle through y
                boxCenter = ( (i+round(window[0]/2.0)), (j+round(window[1]/2.0)) ) # box center

                yrange = np.array( (j, j+window[1]) ) # range in y (inclusive), detector dimension
                # subset of the mask, has dimensions window[0], window[1]
                subMask = mask[xrange[0]:(xrange[1]), yrange[0]:(yrange[1])]

                # number of pixels in the submask (window[0] * window[1])
                numberPixelsWindow = subMask.size

                # need to have at least these many pixels in window with mask pixels = true
                numberPixelsWindowMaskThreshold = round(numberPixelsWindow*fractionPixels)
                
                print numberPixelsWindowMaskThreshold
#                print sum(subMask.flatten())
#                print sum(subMask)
#                
#                print subMask
#                print numberPixelsWindow
#                print window
                # Check if the number of pixels in window is enough
                if subMask.sum() >= numberPixelsWindowMaskThreshold:
                    # calculate the total spectra from the good pixels in the window
                    datZspecSmallInSubMask = datZspecSmall[xrange[0]:(xrange[1]), yrange[0]:(yrange[1]),:]
                    
                    # tile the mask into the energy axis to create a 3d mask
#                    temp2 = np.tile(subMask, (datZspecSmall.shape[2], 1, 1)).transpose((1, 2, 0))
                    # this spectrum is the sum of all the good pixels
                    # divide by number of good pixels so normalized to per pulse
#                    datZspecSmallInMaskInWindow = (datZspecSmallInSubMask * temp2).sum(0).sum(0)/ float(numberPixelsWindowMaskThreshold)

#                    print('subMask')
#                    print(subMask)
#                    print(datZspecSmallInSubMask[subMask,:].shape)
#                    print(numberPixelsWindowMaskThreshold)
                    
#                    datZspecSmallInMaskInWindow = datZspecSmallInSubMask[subMask,:].sum(0) / float(numberPixelsWindowMaskThreshold)
                    datZspecSmallInMaskInWindow = datZspecSmallInSubMask[subMask,:].sum(0) / subMask.flatten().sum()

                    discrimSmallInMaskInWindow = CalculateFeaturesZspecSingleSpectrum(datZspecSmallInMaskInWindow, energy)
#                    discrimSmallInMaskInWindow['dist0'] = \
#                        wBest[0]*discrimSmallInMaskInWindow['binMean'] + \
#                        wBest[1]*discrimSmallInMaskInWindow['binSTD'] + \
#                        wBest[2]*discrimSmallInMaskInWindow['multibin_20_ratio']
#                    discrimSmallInMaskInWindow['discrim'] =  discrimSmallInMaskInWindow['dist0'] - \
#                        np.polyval(pfitmean, discrimSmallInMaskInWindow['count'])
                        
                    # if greater than threshold than create potential object
                    if discrimSmallInMaskInWindow['discrim'] > discrimThreshold:
                        if (discrimSmallInMaskInWindow['count'] > 0 and discrimSmallInMaskInWindow['count'] < 400):
                            print('Potential (%d, %d), %3.2f' %(boxCenter[0], boxCenter[1], discrimSmallInMaskInWindow['discrim']) )
                            potentialTemp = {}
                            potentialTemp['discrim'] = discrimSmallInMaskInWindow
                            potentialTemp['boxCenter'] = boxCenter
                            potentialTemp['xrange'] = xrange
                            potentialTemp['yrange'] = yrange
                            potentialTemp['window'] = window
                            potentialTemp['datZspecSmallInMaskInWindow'] = datZspecSmallInMaskInWindow
                            potentialTemp['subMask'] = subMask
                            potential.append(potentialTemp)
    return(potential)


def BasicScanZspecHighZFinderVer2(discrimSmall, datZspecSmall, mask, energy, windowListInput, discrimThreshold, countRange, fractionPixels):
    """
        Looks for high z material.
        The windows are boxes
        
        10/28/2013
        4/8/2014, JK
    """
    MINBIN = 1
    # see if it is a list
    try:
        windowListInput[0][0]+1 # should be fine if it is a list
        windowList = windowListInput
    except:
        windowList = [windowListInput]  # turn it into a list
    
    # make list for storing potentials
    potential = []
    for (windowIndex, window) in enumerate(windowList):
        for i in np.arange(discrimSmall['discrim'].shape[0] - window[0]): # cycle through x

            xrange = np.array( (i, i+ window[0]) ) # range in x (inclusive)
            # skip slice if all are negatives
            if mask[xrange[0]:(xrange[1]),:].sum() == 0:
                continue
                
            if (i % 100 == 0):
                print('Column %d' %i)
            for j in np.arange(discrimSmall['discrim'].shape[1] - window[1]): # cycle through y
                boxCenter = ( (i+round(window[0]/2.0)), (j+round(window[1]/2.0)) ) # box center

                yrange = np.array( (j, j+window[1]) ) # range in y (inclusive), detector dimension
                # subset of the mask, has dimensions window[0], window[1]
                subMask = mask[xrange[0]:(xrange[1]), yrange[0]:(yrange[1])]

                # number of pixels in the submask (window[0] * window[1])
                numberPixelsWindow = subMask.size

                # need to have at least these many pixels in window with mask pixels = true
                numberPixelsWindowMaskThreshold = round(numberPixelsWindow*fractionPixels)
                
                # Check if the number of pixels in window is enough
                if subMask.sum() >= numberPixelsWindowMaskThreshold:
                    # calculate the total spectra from the good pixels in the window
                    datZspecSmallInSubMask = datZspecSmall[xrange[0]:(xrange[1]), yrange[0]:(yrange[1]),:]
                    
                    # tile the mask into the energy axis to create a 3d mask
#                    temp2 = np.tile(subMask, (datZspecSmall.shape[2], 1, 1)).transpose((1, 2, 0))
                    # this spectrum is the sum of all the good pixels
                    # divide by number of good pixels so normalized to per pulse
#                    datZspecSmallInMaskInWindow = (datZspecSmallInSubMask * temp2).sum(0).sum(0)/ float(numberPixelsWindowMaskThreshold)

                    print('subMask')
                    print(subMask)
                    print(numberPixelsWindowMaskThreshold)
                    print(datZspecSmallInSubMask[subMask,:].shape)
                    print(datZspecSmallInSubMask[subMask,:].mean(0))

#                    datZspecSmallInMaskInWindow = datZspecSmallInSubMask[subMask,:].sum(0) / subMask.flatten().sum()
                    datZspecSmallInMaskInWindow = datZspecSmallInSubMask[subMask,:].mean(0)

                    discrimSmallInMaskInWindow = CalculateFeaturesZspecSingleSpectrum(datZspecSmallInMaskInWindow, energy, MINBIN)
                    
                    # if greater than threshold than create potential object
                    if discrimSmallInMaskInWindow['discrim'] > discrimThreshold:
                        # has to be in correct zspec count range
                        if (discrimSmallInMaskInWindow['count'] > countRange[0] and discrimSmallInMaskInWindow['count'] < countRange[1]):
                            print('Potential (%d, %d), %3.2f' %(boxCenter[0], boxCenter[1], discrimSmallInMaskInWindow['discrim']) )
                            potentialTemp = {}
                            potentialTemp['discrim'] = discrimSmallInMaskInWindow
                            potentialTemp['boxCenter'] = boxCenter
                            potentialTemp['xrange'] = xrange
                            potentialTemp['yrange'] = yrange
                            potentialTemp['window'] = window
                            potentialTemp['datZspecSmallInMaskInWindow'] = datZspecSmallInMaskInWindow
                            potentialTemp['subMask'] = subMask
                            potential.append(potentialTemp)
    return(potential)
    

def ModifyMarkersXPosition(markerList, multiplier, offset):
    """
        Modifies marker files
    """
    
    # Modify the marker files
    temp = copy.deepcopy(markerList)
    # adjust the x values
    for i in np.arange(len(temp)):
        temp[i]['rec_left'] = temp[i]['rec_left'] * multiplier + offset
        temp[i]['rec_right'] = temp[i]['rec_right'] * multiplier + offset
        temp[i]['x'] = temp[i]['x'] * multiplier + offset
        # see if 'left and 'right exist
        if 'left' in temp[i]:
            temp[i]['left']['x'] = temp[i]['left']['x'] *multiplier + offset
        if 'right' in temp[i]:
            temp[i]['right']['x'] = temp[i]['right']['x'] * multiplier + offset
    return(temp)
    
    

# DATA CORRECTION
def CreateGainCorrectionFile(basepath, filename, rateRange, binBounds, rateLook, binBaseline):
    """ Makes a file containing the epoch time and the gain calibration number  
        1st zspec dataset.    
    """
    # Temporary quirks
    #   - baseline dataset is hard coded
    #   - loads extraneous datasets
    # 9/24/2013, changed delimiter to ','
    
    ## READS IN ALL THE FILES AND DEFINES DATASET GROUPS - need to trim down
    # Create list of datasets
    (filenameList, fullfilenameList) = \
                   GenerateDefaultDatasetFilenameList(basepath)    
    # LISTS OF DATASETS GROUPS
    (datasetGroups, datasetGroupsIndices) = \
                    GenerateDefaultDatasetGroupList(filenameList)
    # list of good/bad detectors
    (goodDetectorsList, badDetectorsList) = \
                        GenerateDefaultDetectorList()
    # Load zspec data
    dat = ReadZspec(fullfilenameList)
    # Load summary data
    infoFilename = basepath + '\\' + 'datasetSummaryOLD.txt'
    (datasetDescription, datasetAcquisitionTime, \
        datasetTime, datasetTimeNum, datasetTimeStr) = \
        GetDatasetInformation(infoFilename, filenameList)    

    # Get the indices for the calibration datasets
    CCindices = datasetGroupsIndices['CC'];
    
    # measure for calculating the gain
    # DIM: number of datasets by number of detectors
    binMeanCC = np.zeros((len(CCindices), dat[0].shape[1]))
    
    # gain shift across datasets
    gainShift = np.zeros((len(CCindices), dat[0].shape[1]))
    
#    binArray = np.matlib.repmat(np.arange(0,dat[0].shape[0]),dat[0].shape[1],1).T
    binArray = np.arange(dat[0].shape[0])
    
#    for cc in range(len(datasetGroupsIndices['CC'])):  # cycle through the datasets
    for (cc, datasetIndex) in enumerate(CCindices):# cycle through the datasets
        for ch in xrange(dat[datasetIndex].shape[1]):  # cycle through the detector numbers
            # include only the bins between the binBounds and count rate between the rateRange
            cutt = (binArray > binBounds[0]) & (binArray < binBounds[1])
            cutt = cutt & (dat[datasetIndex][:,ch]/datasetAcquisitionTime[datasetIndex] > rateRange[0]) & \
                (dat[datasetIndex][:,ch]/datasetAcquisitionTime[datasetIndex] < rateRange[1])
            # if there are any bins in this range, then calculate the binMean
            if any(cutt):
                # fit to single order polynomial
                temp = np.polyfit(binArray[cutt], np.log(dat[datasetIndex][cutt,ch]/datasetAcquisitionTime[datasetIndex]),1)
                pfit0 = [temp[0], np.exp(temp[1])]
                binMean = (1.0/pfit0[0]) * np.log(rateLook/pfit0[1])
            else:
                binMean = 0.0
            binMeanCC[cc, ch] = binMean
            # Print summary
        print(filenameList[datasetIndex], binMeanCC[cc,:])
    gainShift = binMeanCC / binBaseline

    # print a summary
    # Build the array that will be written to file, containing time string, epoch time and gain correction values
    temp = np.empty((len(CCindices), 1 + dat[0].shape[1]), dtype = '|S20')
    temp[:,0] = datasetTimeNum[CCindices]
    temp[:,1:] = binMeanCC
    #temp[:,1:] = gainShift
    
    fullfilename = basepath + '\\' + filename
    fid = open(fullfilename, 'wb')
    csvWriterObj = csv.writer(fid, delimiter = ',')
    csvWriterObj.writerows(temp)
    fid.close()

def CreateGainCorrectionFile2ndSet(basepath, filename, rateRange, binBounds, rateLook, binBaseline):
    """ Makes a file containing the epoch time and the fit bin value.  (I had been saying that this was
        the gain calibration value but it's not because that would suggests that it's a number I would use to 
        multiple the bin values by to get calibrated bins.)
        2st zspec dataset.    
        4/3/2015
    """
    # Temporary quirks
    #   - baseline dataset is hard coded
    #   - loads extraneous datasets
    # 9/24/2013, changed delimiter to ','
    
    ## READS IN ALL THE FILES AND DEFINES DATASET GROUPS - need to trim down
    # Create list of datasets
    (filenameList, fullfilenameList) = \
                   GenerateDefaultDatasetFilenameList2ndSet(basepath)    
    # LISTS OF DATASETS GROUPS
    (datasetGroups, datasetGroupsIndices,datasetGroupsWidth, datasetRawWidth, datasetMaterial) = \
                    GenerateDefaultDatasetGroupList2ndSet(filenameList)
    # list of good/bad detectors
    (goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask) = \
                        GenerateDefaultDetectorList2ndSet()
    # Load zspec data
    dat = ReadZspec(fullfilenameList)
    # Load summary data
    infoFilename = os.path.join(basepath,'DatasetSummary2ndSet.txt')
    (datasetDescription, datasetAcquisitionTime, \
        datasetTime, datasetTimeNum, datasetTimeStr) = \
        GetDatasetInformation(infoFilename, filenameList)    

    # Get the indices for the calibration datasets
    CCindices = datasetGroupsIndices['CC'];
    CCindices = datasetGroupsIndices['IB'];
    
    # Measure for calculating the gain
    #  Where the rate pulse at 60 Hz is equal to "rateLook";  the gain calibration
    # tries to shift all the spectra so that this point of hte spectra is at bin 100
    # DIM: number of datasets by number of detectors
    binMeanCC = np.zeros((len(CCindices), dat[0].shape[1]))
    
    # gain shift across datasets
    gainShift = np.zeros((len(CCindices), dat[0].shape[1]))
    
#    binArray = np.matlib.repmat(np.arange(0,dat[0].shape[0]),dat[0].shape[1],1).T
    binArray = np.arange(dat[0].shape[0])
    
#    for cc in range(len(datasetGroupsIndices['CC'])):  # cycle through the datasets
    for (cc, datasetIndex) in enumerate(CCindices):# cycle through the datasets
        for ch in xrange(dat[datasetIndex].shape[1]):  # cycle through the detector numbers
            # include only the bins between the binBounds and count rate between the rateRange
            cutt = (binArray > binBounds[0]) & (binArray < binBounds[1])
            cutt = cutt & (dat[datasetIndex][:,ch]/datasetAcquisitionTime[datasetIndex] > rateRange[0]) & \
                (dat[datasetIndex][:,ch]/datasetAcquisitionTime[datasetIndex] < rateRange[1])
            # if there are any bins in this range, then calculate the binMean
            if any(cutt):
                # fit to single order polynomial
                temp = np.polyfit(binArray[cutt], np.log(dat[datasetIndex][cutt,ch]/datasetAcquisitionTime[datasetIndex]),1)
                pfit0 = [temp[0], np.exp(temp[1])]
                binMean = (1.0/pfit0[0]) * np.log(rateLook/pfit0[1])
            else:
                binMean = 0.0
            binMeanCC[cc, ch] = binMean
            # Print summary
        print(filenameList[datasetIndex], binMeanCC[cc,:])
    gainShift = binMeanCC / binBaseline

    # Print a summary and build the array that will be written to file, containing time string, epoch time and gain correction values
    temp = np.empty((len(CCindices), 1 + dat[0].shape[1]), dtype = '|S20')
    temp[:,0] = datasetTimeNum[CCindices]
    temp[:,1:] = binMeanCC
    #temp[:,1:] = gainShift
    
    fullfilename = os.path.join(basepath, filename)
    fid = open(fullfilename, 'wb')
    csvWriterObj = csv.writer(fid, delimiter = ',')
    csvWriterObj.writerows(temp)
    fid.close()

# DATA CORRECTION
def CreateGainCorrectionFileOLD(basepath, filename, countRange, binBounds):
    """ Makes a file containing the epoch time and the gain calibration number  """    
    # Temporary quirks
    #   - baseline dataset is hard coded
    #   - loads extraneous datasets
    
    ## READS IN ALL THE FILES AND DEFINES DATASET GROUPS - need to trim down
    # Create list of datasets
    (filenameList, fullfilenameList) = \
                   GenerateDefaultDatasetFilenameList(basepath)    
    # LISTS OF DATASETS GROUPS
    (datasetGroups, datasetGroupsIndices) = \
                    GenerateDefaultDatasetGroupList(filenameList)
    # list of good/bad detectors
    (goodDetectorsList, badDetectorsList) = \
                        GenerateDefaultDetectorList()
    # Load zspec data
    dat = ReadZspec(fullfilenameList)
    # Load summary data
    infoFilename = basepath + '\\' + 'datasetSummaryOLD.txt'
    (datasetDescription, datasetAcquisitionTime, \
     datasetTime, datasetTimeNum, datasetTimeStr) = \
     GetDatasetInformation(infoFilename, filenameList)    
    ## read in the information file

    ## calculate the gain correction parameters
    # Count range for calculating bin
    CCindices = datasetGroupsIndices['CC'];
    
    # measure for calculating the gain
    binMeanCC = np.zeros((len(CCindices), dat[0].shape[1]))
    # gain shift across datasets
    gainShift = np.zeros((len(CCindices), dat[0].shape[1]))
    
    binArray = np.matlib.repmat(np.arange(0,dat[0].shape[0]),dat[0].shape[1],1).T
    
    for cc in range(len(datasetGroupsIndices['CC'])):
        ii = CCindices[cc]
        cutt = (binArray > binBounds[0]) & (binArray < binBounds[1]) & (dat[ii] > countRange[0]) & (dat[ii] < countRange[1])
        binMeanCC[cc,:] = (binArray * dat[ii] * cutt).sum(axis = 0) / (dat[ii] * cutt).sum(axis = 0)
        gainShift[cc,:] = binMeanCC[0,:] / binMeanCC[cc,:]

    # Build the array that will be written to file, containing time string, epoch time and gain correction values
    temp = np.empty((len(CCindices), 1 + dat[0].shape[1]), dtype = '|S20')
    temp[:,0] = datasetTimeNum[CCindices]
    temp[:,1:] = binMeanCC
    #temp[:,1:] = gainShift
    
    fullfilename = basepath + '\\' + filename
    fid = open(fullfilename, 'wb')
    csvWriterObj = csv.writer(fid, delimiter = '\t')
    csvWriterObj.writerows(temp)
    fid.close()

def LoadGainCalibration(fullfilename):
    fid = open(fullfilename, 'rb')
    csvReaderObj = csv.reader(fid, delimiter = ',')
    datasetTimeNum = list()
    gainShift = list()
    
    # the for loop seems to return incomplete line so doing this for now
    for ii in range(10):
        lineIn = csvReaderObj.next()
        lineIn = np.array(lineIn)
        datasetTimeNum.append(lineIn[0])
        gainShift.append(lineIn[1:])
    # this seems to return incomplete line
    #for lineIn in csvReaderObj.next():
        #print lineIn
        #lineIn = np.array(lineIn)
        #print lineIn
        #datasetTimeNum.append(lineIn[1])
        #gainShift.append(lineIn[2:])
    fid.close()
    return (np.array(datasetTimeNum).astype(float), np.array(gainShift).astype(float))

def ExtrapolateGain(calTimeNum, gainShift, datasetTimeNumList):
    """Make array for correction gain across time"""
    # make empy array size (number of datasets) x (number of detectors)
    gainExtrapolated = np.zeros((len(datasetTimeNumList), gainShift.shape[1]))
    # cycle through each detector at a time
    for ii in range(gainShift.shape[1]):
        gainExtrapolated[:,ii] = np.interp(datasetTimeNumList, calTimeNum, gainShift[:,ii])    
    return(gainExtrapolated)

def SaveSpectra(dat, fullfilename):
    """Write array of spectra to disk"""
    fid = open(fullfilename, 'wb')
    csvWriter = csv.writer(fid, delimiter = ',')
    
    numDetectors = dat.shape[1]
    numBins = dat.shape[0]
    
    # make header and write it
    temp = np.arange(0,numDetectors+1).astype('str')
    temp[0] = ''
    for ii in np.arange(1,numDetectors+1):
        temp[ii] = 'D' + temp[ii]
    csvWriter.writerow(temp)
    #write the datw rows
    for ii in np.arange(numBins):
        csvWriter.writerow(np.hstack((ii, dat[ii,:])))
    fid.close()

# need the datasetSummary file? 
#def CalculateStatsAll(filenameList, statsList):
    #1+1
    ## cycle through datasets
        ## load all files for a dataset
            ##  cycle through different binning structure
                ## rebin the spectra
                ## calculate the stat values
                ## save to file


def CheckGain(dat, acqTime60Hz, rateRange, rateLook, binBaseline, threshold):
    """Given a filename, acquisition time and threshold, it gives the detector number
        gain shift, and answer as to whether or not the gain shift has exceeded the 
        threshold.
        8/28/2013, John Kwong
        8/29/2013, Added option to print all or 
    """
    # convert the threshold string to float
    threshold = float(threshold)
    # convert acquisition time
    acqTime = float(acqTime60Hz)
    binBounds = [20, 200]
    # Read the file    dat = ReadZspec(fullFilename)
#    print dat[0].shape
    binArray = np.arange(dat.shape[0])
    
    # fit an exponential
#    pfitAll = np.zeros((dat[0].shape[1], 2))
    binMeanAll = np.zeros(dat.shape[1])

    for i in xrange(dat.shape[1]):
        cutt = (binArray > binBounds[0]) & (binArray < binBounds[1]) & (dat[:,i]/acqTime > rateRange[0]) & (dat[:,i]/acqTime < rateRange[1])
        if any(cutt):
            temp = np.polyfit(binArray[cutt], np.log(dat[cutt,i]/acqTime),1)
            pfit0 = [temp[0], np.exp(temp[1])]
            binMean = (1.0/pfit0[0]) * np.log(rateLook/pfit0[1])
        else:
            binMean = 0.0
        binMeanAll[i] = binMean
#        print i+1, binMean
    gainShift = binMeanAll / binBaseline

    score = np.abs(1 - gainShift) <= threshold

    # display results
    print("Target value at rate %3.3f is %3.3f" %(rateLook, binBaseline))
    print("Gain Shift Threshold =  %3.3f" % threshold)
    
    print("%s\t%s\t%s" %("Det#", "Shift", "Good?"))
    for (index, gs) in enumerate(gainShift):
        print("%d\t%3.3f\t%r" %(index+1, gs, score[index]))

    return(gainShift, score, binMeanAll)


def CalculateStats(dat, acqTimeMC, spectrumBins, gainCorrectionMat, lowerBinThreshold, pulseRate):
    """ Calculate Stats given the specta, bins, statslist and gain correciton matrix
        Added count60Hz, basically counts normalized to counts
        3/14/2014, remove outputDir argument. wasn't being used.
        4/6/2014, Added 3rd, 4th moments, kurtosis and skewness.
    """
    # initialize the array that holds the stats
    baselineIndex = 0
    baselineDetectorCh = 70
    stats = dict()
    statsList = ('binSum', \
                 'binMean', 'binSTD', 'binSTD_binMean',\
                'q_10', 'q_25', 'q_50', 'q_75', 'q_90', \
                'q_range', 'q_range_ratio', 'binMeanSq', 'binMean_q50', \
                 \
                'binMean_g', 'binSTD_g',\
                'q_10_g', 'q_25_g', 'q_50_g', 'q_75_g', 'q_90_g', \
                'q_range_g', 'binMeanSq_g', 'binMean_q50_g', \
                 \
                 'binMean_g1', 'binSTD_g1',\
                'q_10_g1', 'q_25_g1', 'q_50_g1', 'q_75_g1', 'q_90_g1', \
                'q_range_g1', 'binMeanSq_g1', 'binMean_q50_g1', \
                 \
                'multibin_0_10', 'multibin_10_256', 'multibin_0_20', 'multibin_20_256', \
                'multibin_10_ratio', 'multibin_20_ratio', \
                 \
                'multibin_0_10_g', 'multibin_10_256_g', 'multibin_0_20_g', 'multibin_20_256_g', \
                'multibin_10_ratio_g', 'multibin_20_ratio_g', \
                 \
                'multibin_0_10_g1', 'multibin_10_256_g1', 'multibin_0_20_g1', 'multibin_20_256_g1', \
                'multibin_10_ratio_g1', 'multibin_20_ratio_g1', \
                'count', 'countPerPulse', 'countTotal', \
                'binMoment3', 'binMoment4', 'binMoment3_g', 'binMoment4_g', 'binSkew', 'binKur')                
                
    # Initialize array
    for i in range(0,len(statsList)):
        stats[statsList[i]] = np.zeros((len(dat), dat[0].shape[1]))
    # useful matrix for vectorizing calculations    
    binArray = np.matlib.repmat(spectrumBins, dat[0].shape[1],1).T
    
    # CALCULATE THE STATISTICS
    for ii in np.arange(len(dat)): # cycle through datasets
#        print ii
        # normalize to a single pulse
        datTemp = dat[ii].astype(float)/acqTimeMC[ii]/pulseRate[ii]
        
        # Remove the lower bins by setting it to zero
        # this is approximate because it assumes gain shift isn't too much
#        datTemp[0:lowerBinThreshold,:] = 0.0        

        # This is more accurate
        if ii == 0:
            print np.round(lowerBinThreshold/gainCorrectionMat[ii,baselineDetectorCh])
            print datTemp[:,baselineDetectorCh]        
        
        # cycle through detectors
        for jj in xrange(datTemp.shape[1]):
            lowerBinThresholdCorrected = round(lowerBinThreshold/gainCorrectionMat[ii,jj])
            datTemp[0:lowerBinThresholdCorrected,jj] = 0.0 # set lower bins to zero
            
        if ii == 0:
            print np.round(lowerBinThreshold/gainCorrectionMat[ii,baselineDetectorCh])
            print datTemp[:,baselineDetectorCh]
            
        # Counts per pulse
        stats['count'][ii,:] = datTemp.sum(axis = 0)
        stats['countPerPulse'][ii,:] = datTemp.sum(axis = 0)
        stats['countTotal'][ii,:] = datTemp.sum(axis = 0) * acqTimeMC[ii] * pulseRate[ii] # multiple by time and pulse rate
    
        # QUANTILES
        datTempCumSum = datTemp.cumsum(axis = 0) # cumulative sum
        datTempCumSum = datTempCumSum / np.matlib.repmat(datTempCumSum[-1,:], 256, 1) # normalize the cumulative sum
        qIntervals = [10., 25., 50., 75., 90.]
        for j in range(len(qIntervals)):
            statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
            stats[statName][ii,:] = spectrumBins[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)), axis = 0)]
        stats['q_range'][ii,:] = stats['q_75'][ii,:] - stats['q_25'][ii,:]
        stats['q_range_ratio'][ii,:] = stats['q_range'][ii,:] / stats['q_50'][ii,:]
    
        # calculate the mean and spread
        stats['binMean'][ii,:] = (binArray * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binMeanSq'][ii,:] = ((binArray**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binSum'][ii,:] = datTemp.sum(axis = 0).astype(float)
    
        stats['binMean_q50'][ii,:] = stats['binMean'][ii,:] - stats['q_50'][ii,:]
    
        temp = np.matlib.repmat(stats['binMean'][ii,:], datTemp.shape[0],1)  # array of means, 256 x number detectors
    
        stats['binSTD'][ii,:] = np.sqrt(  ( ((binArray - temp)**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)  )
        stats['binSTD_binMean'][ii,:] = stats['binSTD'][ii,:] / stats['binMean'][ii,:]
        
        # higher moments
        stats['binMoment4'][ii,:] = ( ((binArray - temp)**4) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binMoment3'][ii,:] = ( ((binArray - temp)**3) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        
        # skewness and kurtosis
        # this shoult be gain invariant as the both the numerator and denominator are linear with gain
        stats['binSkew'][ii,:] = stats['binMoment3'][ii,:] / stats['binSTD'][ii,:] **3.0
        stats['binKur'][ii,:] = stats['binMoment4'][ii,:] / stats['binSTD'][ii,:] **4.0
        
        # Correction across time
        g = gainCorrectionMat[ii,:]
        # Correct across detectors; line up gains to the baseline director, first CC datasets
        g1 = gainCorrectionMat[ii,:]
        
        stats['q_10_g'][ii,:] = stats['q_10'][ii,:] * g
        stats['q_25_g'][ii,:] = stats['q_25'][ii,:] * g
        stats['q_50_g'][ii,:] = stats['q_50'][ii,:] * g
        stats['q_75_g'][ii,:] = stats['q_75'][ii,:] * g
        stats['q_90_g'][ii,:] = stats['q_90'][ii,:] * g
        stats['q_range_g'][ii,:] = stats['q_range'][ii,:] * g
        stats['binMean_g'][ii,:] = stats['binMean'][ii,:] * g
        stats['binMeanSq_g'][ii,:] = stats['binMeanSq'][ii,:] * g
        stats['binMean_q50_g'][ii,:] = stats['binMean_q50'][ii,:] * g
        stats['binSTD_g'][ii,:] = stats['binSTD'][ii,:] * g
        stats['binMoment3_g'][ii,:] = stats['binMoment3'][ii,:] * g
        stats['binMoment4_g'][ii,:] = stats['binMoment4'][ii,:] * g
    
        stats['q_10_g1'][ii,:] = stats['q_10'][ii,:] * g1
        stats['q_25_g1'][ii,:] = stats['q_25'][ii,:] * g1
        stats['q_50_g1'][ii,:] = stats['q_50'][ii,:] * g1
        stats['q_75_g1'][ii,:] = stats['q_75'][ii,:] * g1
        stats['q_90_g1'][ii,:] = stats['q_90'][ii,:] * g1
        stats['q_range_g1'][ii,:] = stats['q_range'][ii,:] * g1
        stats['binMean_g1'][ii,:] = stats['binMean'][ii,:] * g1
        stats['binMeanSq_g1'][ii,:] = stats['binMeanSq'][ii,:] * g1
        stats['binMean_q50_g1'][ii,:] = stats['binMean_q50'][ii,:] * g1
        stats['binSTD_g1'][ii,:] = stats['binSTD'][ii,:] * g1
    
        # MULTIBIN PARAMETERS
        # Calculate high-low ratios - no rebinning so no correction
        
        # find the bin that is just above 10
        highlowIndex10 = np.argmin( abs(spectrumBins - 10) )
        highlowIndex20 = np.argmin( abs(spectrumBins - 20) )
        
        stats['multibin_0_10'][ii,:] = datTemp[0:highlowIndex10,:].sum(axis=0)  # bins 0 to 9, so first ten bins
        stats['multibin_10_256'][ii,:] = datTemp[highlowIndex10:,:].sum(axis=0)
        stats['multibin_0_20'][ii,:] = datTemp[0:highlowIndex20,:].sum(axis=0)
        stats['multibin_20_256'][ii,:] = datTemp[highlowIndex20:,:].sum(axis=0)
    
        stats['multibin_10_ratio'][ii,:]  = stats['multibin_0_10'][ii,:]  /stats['multibin_10_256'][ii,:]
        stats['multibin_20_ratio'][ii,:]  = stats['multibin_0_20'][ii,:]  /stats['multibin_20_256'][ii,:]
    
        # calculate the histogram split location with correction for gain shift in time
#         binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:]
        binCorrection = 1/gainCorrectionMat[ii,:]
        
        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, 256,1)*9.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection, 256,1)*19.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        stats['multibin_0_20_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
        stats['multibin_10_ratio_g'][ii,:] = stats['multibin_0_10_g'][ii,:] / stats['multibin_10_256_g'][ii,:]
        stats['multibin_20_ratio_g'][ii,:] = stats['multibin_0_20_g'][ii,:] / stats['multibin_20_256_g'][ii,:]
    
        # CORRECT ACROSS DETECTORS using the first CC dataset to correct
        # binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:] # current/baseline
#        temp = gainCorrectionMat[baselineIndex,:] / gainCorrectionMat[baselineIndex,baselineDetector]
#        binCorrection = binCorrection * temp
        binCorrection = 1/gainCorrectionMat[ii,:]

        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*9.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)

        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*19.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
    
        stats['multibin_0_20_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        stats['multibin_10_ratio_g1'][ii,:] = stats['multibin_0_10_g1'][ii,:] / stats['multibin_10_256_g1'][ii,:]
        stats['multibin_20_ratio_g1'][ii,:] = stats['multibin_0_20_g1'][ii,:] / stats['multibin_20_256_g1'][ii,:]
    return stats

def CalculateStatsInterp(datInput, acqTimeMC, spectrumBinsInput, gainCorrectionMat, lowerBinThreshold, pulseRate):
    """ Calculate Stats given the specta, bins, statslist and gain correciton matrix
        Added count60Hz, basically counts normalized to counts
        3/14/2014, remove outputDir argument. wasn't being used.
        4/6/2014, Added 3rd, 4th moments, kurtosis and skewness.
    """
    baselineIndex = 0
    baselineDetectorCh = 70
    
    lowerBinThreshold = lowerBinThreshold * 10.0
    
    # new spectrum bins
    spectrumBins = np.arange(spectrumBinsInput[0], spectrumBinsInput[-1], 0.1)
    dat = []
    for i in xrange(len(datInput)):
        temp = np.zeros((len(spectrumBins), 136))
        for ch in xrange(datInput[i].shape[1]):
            yy = np.interp(spectrumBins, spectrumBinsInput, datInput[i][:,ch]) / 10.0
            temp[:,ch] = copy.copy(yy)
        dat.append(temp)
    
    # initialize the array that holds the stats
    stats = dict()
    statsList = ('binSum', \
                 'binMean', 'binSTD', 'binSTD_binMean',\
                'q_10', 'q_25', 'q_50', 'q_75', 'q_90', \
                'q_range', 'q_range_ratio', 'binMeanSq', 'binMean_q50', \
                 \
                'binMean_g', 'binSTD_g',\
                'q_10_g', 'q_25_g', 'q_50_g', 'q_75_g', 'q_90_g', \
                'q_range_g', 'binMeanSq_g', 'binMean_q50_g', \
                 \
                 'binMean_g1', 'binSTD_g1',\
                'q_10_g1', 'q_25_g1', 'q_50_g1', 'q_75_g1', 'q_90_g1', \
                'q_range_g1', 'binMeanSq_g1', 'binMean_q50_g1', \
                 \
                'multibin_0_10', 'multibin_10_256', 'multibin_0_20', 'multibin_20_256', \
                'multibin_10_ratio', 'multibin_20_ratio', \
                 \
                'multibin_0_10_g', 'multibin_10_256_g', 'multibin_0_20_g', 'multibin_20_256_g', \
                'multibin_10_ratio_g', 'multibin_20_ratio_g', \
                 \
                'multibin_0_10_g1', 'multibin_10_256_g1', 'multibin_0_20_g1', 'multibin_20_256_g1', \
                'multibin_10_ratio_g1', 'multibin_20_ratio_g1', \
                'count', 'countPerPulse', 'countTotal', \
                'binMoment3', 'binMoment4', 'binMoment3_g', 'binMoment4_g', 'binSkew', 'binKur')                
                
    # Initialize array
    for i in range(0,len(statsList)):
        stats[statsList[i]] = np.zeros((len(dat), dat[0].shape[1]))
        
    # useful matrix for vectorizing calculations    
    binArray = np.matlib.repmat(spectrumBins, dat[0].shape[1],1).T
    
    # CALCULATE THE STATISTICS
    for ii in xrange(len(dat)): # cycle through datasets
#        print ii
        # normalize to a single pulse
        datTemp = dat[ii].astype(float)/acqTimeMC[ii]/pulseRate[ii]
        
        # Remove the lower bins by setting it to zero
        # this is approximate because it assumes gain shift isn't too much
#        datTemp[0:lowerBinThreshold,:] = 0.0        

        # This is more accurate
        if ii == 0:
            print np.round(lowerBinThreshold/gainCorrectionMat[ii,baselineDetectorCh])
            print datTemp[:,baselineDetectorCh]        
        
        # cycle through detectors
        for jj in xrange(datTemp.shape[1]):
            lowerBinThresholdCorrected = round(lowerBinThreshold/gainCorrectionMat[ii,jj])
            datTemp[0:lowerBinThresholdCorrected,jj] = 0.0 # set lower bins to zero
            
        if ii == 0:
            print np.round(lowerBinThreshold/gainCorrectionMat[ii,baselineDetectorCh])
            print datTemp[:,baselineDetectorCh]
            
        # Counts per pulse
        stats['count'][ii,:] = datTemp.sum(axis = 0)
        stats['countPerPulse'][ii,:] = datTemp.sum(axis = 0)
        stats['countTotal'][ii,:] = datTemp.sum(axis = 0) * acqTimeMC[ii] * pulseRate[ii] # multiple by time and pulse rate
    
        # QUANTILES
        datTempCumSum = datTemp.cumsum(axis = 0) # cumulative sum
        datTempCumSum = datTempCumSum / np.matlib.repmat(datTempCumSum[-1,:], datTemp.shape[0], 1) # normalize the cumulative sum
        qIntervals = [10., 25., 50., 75., 90.]
        for j in range(len(qIntervals)):
            statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
            stats[statName][ii,:] = spectrumBins[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)), axis = 0)]
        stats['q_range'][ii,:] = stats['q_75'][ii,:] - stats['q_25'][ii,:]
        stats['q_range_ratio'][ii,:] = stats['q_range'][ii,:] / stats['q_50'][ii,:]
    
        # calculate the mean and spread
        stats['binMean'][ii,:] = (binArray * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binMeanSq'][ii,:] = ((binArray**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binSum'][ii,:] = datTemp.sum(axis = 0).astype(float)
    
        stats['binMean_q50'][ii,:] = stats['binMean'][ii,:] - stats['q_50'][ii,:]
    
        # array of means, 256 x number detectors
        temp = np.matlib.repmat(stats['binMean'][ii,:], datTemp.shape[0],1)
    
        stats['binSTD'][ii,:] = np.sqrt(  ( ((binArray - temp)**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)  )
        stats['binSTD_binMean'][ii,:] = stats['binSTD'][ii,:] / stats['binMean'][ii,:]
        
        # higher moments
        stats['binMoment4'][ii,:] = ( ((binArray - temp)**4) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binMoment3'][ii,:] = ( ((binArray - temp)**3) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        
        # skewness and kurtosis
        # this shoult be gain invariant as the both the numerator and denominator are linear with gain
        stats['binSkew'][ii,:] = stats['binMoment3'][ii,:] / stats['binSTD'][ii,:] **3.0
        stats['binKur'][ii,:] = stats['binMoment4'][ii,:] / stats['binSTD'][ii,:] **4.0
        
        # Correction across time
        g = gainCorrectionMat[ii,:]
        # Correct across detectors; line up gains to the baseline director, first CC datasets
        g1 = gainCorrectionMat[ii,:]
        
        stats['q_10_g'][ii,:] = stats['q_10'][ii,:] * g
        stats['q_25_g'][ii,:] = stats['q_25'][ii,:] * g
        stats['q_50_g'][ii,:] = stats['q_50'][ii,:] * g
        stats['q_75_g'][ii,:] = stats['q_75'][ii,:] * g
        stats['q_90_g'][ii,:] = stats['q_90'][ii,:] * g
        stats['q_range_g'][ii,:] = stats['q_range'][ii,:] * g
        stats['binMean_g'][ii,:] = stats['binMean'][ii,:] * g
        stats['binMeanSq_g'][ii,:] = stats['binMeanSq'][ii,:] * g
        stats['binMean_q50_g'][ii,:] = stats['binMean_q50'][ii,:] * g
        stats['binSTD_g'][ii,:] = stats['binSTD'][ii,:] * g
        stats['binMoment3_g'][ii,:] = stats['binMoment3'][ii,:] * g
        stats['binMoment4_g'][ii,:] = stats['binMoment4'][ii,:] * g
    
        stats['q_10_g1'][ii,:] = stats['q_10'][ii,:] * g1
        stats['q_25_g1'][ii,:] = stats['q_25'][ii,:] * g1
        stats['q_50_g1'][ii,:] = stats['q_50'][ii,:] * g1
        stats['q_75_g1'][ii,:] = stats['q_75'][ii,:] * g1
        stats['q_90_g1'][ii,:] = stats['q_90'][ii,:] * g1
        stats['q_range_g1'][ii,:] = stats['q_range'][ii,:] * g1
        stats['binMean_g1'][ii,:] = stats['binMean'][ii,:] * g1
        stats['binMeanSq_g1'][ii,:] = stats['binMeanSq'][ii,:] * g1
        stats['binMean_q50_g1'][ii,:] = stats['binMean_q50'][ii,:] * g1
        stats['binSTD_g1'][ii,:] = stats['binSTD'][ii,:] * g1
    
        # MULTIBIN PARAMETERS
        # Calculate high-low ratios - no rebinning so no correction
        
        # find the bin that is just above 10
        highlowIndex10 = np.argmin( abs(spectrumBins - 10.0) )
        highlowIndex20 = np.argmin( abs(spectrumBins - 20.0) )
        
        stats['multibin_0_10'][ii,:] = datTemp[0:highlowIndex10,:].sum(axis=0)  # bins 0 to 9, so first ten bins
        stats['multibin_10_256'][ii,:] = datTemp[highlowIndex10:,:].sum(axis=0)
        stats['multibin_0_20'][ii,:] = datTemp[0:highlowIndex20,:].sum(axis=0)
        stats['multibin_20_256'][ii,:] = datTemp[highlowIndex20:,:].sum(axis=0)
    
        stats['multibin_10_ratio'][ii,:]  = stats['multibin_0_10'][ii,:]  /stats['multibin_10_256'][ii,:]
        stats['multibin_20_ratio'][ii,:]  = stats['multibin_0_20'][ii,:]  /stats['multibin_20_256'][ii,:]
    
        # calculate the histogram split location with correction for gain shift in time
#         binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:]
        binCorrection = 1/gainCorrectionMat[ii,:]
        
        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, datTemp.shape[0],1)*9.9)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection,  datTemp.shape[0],1)*19.9)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        stats['multibin_0_20_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
        stats['multibin_10_ratio_g'][ii,:] = stats['multibin_0_10_g'][ii,:] / stats['multibin_10_256_g'][ii,:]
        stats['multibin_20_ratio_g'][ii,:] = stats['multibin_0_20_g'][ii,:] / stats['multibin_20_256_g'][ii,:]
    
        # CORRECT ACROSS DETECTORS using the first CC dataset to correct
        # binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:] # current/baseline
#        temp = gainCorrectionMat[baselineIndex,:] / gainCorrectionMat[baselineIndex,baselineDetector]
#        binCorrection = binCorrection * temp
        binCorrection = 1/gainCorrectionMat[ii,:]

        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*9.9)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)

        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*19.9)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
    
        stats['multibin_0_20_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        stats['multibin_10_ratio_g1'][ii,:] = stats['multibin_0_10_g1'][ii,:] / stats['multibin_10_256_g1'][ii,:]
        stats['multibin_20_ratio_g1'][ii,:] = stats['multibin_0_20_g1'][ii,:] / stats['multibin_20_256_g1'][ii,:]
    return stats



def CalculateStatsOLD(dat, acqTimeMC, spectrumBins, gainCorrectionMat, outputDir, lowerBinThreshold, baselineDetector):
    """ Calculate Stats given the specta, bins, statslist and gain correciton matrix """
    # initialize the array that holds the stats
    baselineIndex = 0
    stats = dict()
    statsList = ('binSum', \
                 'binMean', 'binSTD', 'binSTD_binMean',\
                'q_10', 'q_25', 'q_50', 'q_75', 'q_90', \
                'q_range', 'q_range_ratio', 'binMeanSq', 'binMean_q50', \
                 \
                'binMean_g', 'binSTD_g',\
                'q_10_g', 'q_25_g', 'q_50_g', 'q_75_g', 'q_90_g', \
                'q_range_g', 'binMeanSq_g', 'binMean_q50_g', \
                 \
                 'binMean_g1', 'binSTD_g1',\
                'q_10_g1', 'q_25_g1', 'q_50_g1', 'q_75_g1', 'q_90_g1', \
                'q_range_g1', 'binMeanSq_g1', 'binMean_q50_g1', \
                 \
                'multibin_0_10', 'multibin_10_256', 'multibin_0_20', 'multibin_20_256', \
                'multibin_10_ratio', 'multibin_20_ratio', \
                 \
                'multibin_0_10_g', 'multibin_10_256_g', 'multibin_0_20_g', 'multibin_20_256_g', \
                'multibin_10_ratio_g', 'multibin_20_ratio_g', \
                 \
                'multibin_0_10_g1', 'multibin_10_256_g1', 'multibin_0_20_g1', 'multibin_20_256_g1', \
                'multibin_10_ratio_g1', 'multibin_20_ratio_g1', \
                'transmission')
    # Initialize array
    for i in range(0,len(statsList)):
        stats[statsList[i]] = np.zeros((len(dat), dat[0].shape[1]))
    # useful matrix for vectorizing calculations    
    binArray = np.matlib.repmat(spectrumBins, dat[0].shape[1],1).T
    
    # CALCULATE THE STATISTICS
    for ii in np.arange(len(dat)):
        print ii
        datTemp = dat[ii].astype(float)
        
        # remove the lower bins by setting it to zero
        datTemp[0:lowerBinThreshold,:] = 0
    
        # QUANTILES
        datTempCumSum = datTemp.cumsum(axis = 0) # cumulative sum
        datTempCumSum = datTempCumSum / np.matlib.repmat(datTempCumSum[-1,:], 256, 1) # normalize the cumulative sum
        qIntervals = [10., 25., 50., 75., 90.]
        for j in range(len(qIntervals)):
            statName = 'q_%02d' % qIntervals[j]# apparently needs a temporary variable
            stats[statName][ii,:] = spectrumBins[np.argmin(abs(datTempCumSum - (qIntervals[j]/100)), axis = 0)]
        stats['q_range'][ii,:] = stats['q_75'][ii,:] - stats['q_25'][ii,:]
        stats['q_range_ratio'][ii,:] = stats['q_range'][ii,:] / stats['q_50'][ii,:]
    
        # calculate the mean and spread
        stats['binMean'][ii,:] = (binArray * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binMeanSq'][ii,:] = ((binArray**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        stats['binSum'][ii,:] = datTemp.sum(axis = 0).astype(float)
    
        stats['binMean_q50'][ii,:] = stats['binMean'][ii,:] - stats['q_50'][ii,:]
    
        temp = np.matlib.repmat(stats['binMean'][ii,:], datTemp.shape[0],1)  # array of means, 256 x number detectors
    
        stats['binSTD'][ii,:] = np.sqrt(  ( ((binArray - temp)**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)  )
        stats['binSTD_binMean'][ii,:] = stats['binSTD'][ii,:] / stats['binMean'][ii,:]
        
        # Correction across time
        g = gainCorrectionMat[baselineIndex,:] / gainCorrectionMat[ii,:]
        # Correct across detectors; line up gains to the baseline director, first CC datasets
        g1 = gainCorrectionMat[baselineIndex,baselineDetector] / gainCorrectionMat[baselineIndex,:]    
        
        stats['q_10_g'][ii,:] = stats['q_10'][ii,:] * g
        stats['q_25_g'][ii,:] = stats['q_25'][ii,:] * g
        stats['q_50_g'][ii,:] = stats['q_50'][ii,:] * g
        stats['q_75_g'][ii,:] = stats['q_75'][ii,:] * g
        stats['q_90_g'][ii,:] = stats['q_90'][ii,:] * g
        stats['q_range_g'][ii,:] = stats['q_range'][ii,:] * g
        stats['binMean_g'][ii,:] = stats['binMean'][ii,:] * g
        stats['binMeanSq_g'][ii,:] = stats['binMeanSq'][ii,:] * g
        stats['binMean_q50_g'][ii,:] = stats['binMean_q50'][ii,:] * g
        stats['binSTD_g'][ii,:] = stats['binSTD'][ii,:] * g
    
        stats['q_10_g1'][ii,:] = stats['q_10'][ii,:] * g * g1
        stats['q_25_g1'][ii,:] = stats['q_25'][ii,:] * g * g1
        stats['q_50_g1'][ii,:] = stats['q_50'][ii,:] * g * g1
        stats['q_75_g1'][ii,:] = stats['q_75'][ii,:] * g * g1
        stats['q_90_g1'][ii,:] = stats['q_90'][ii,:] * g * g1
        stats['q_range_g1'][ii,:] = stats['q_range'][ii,:] * g * g1
        stats['binMean_g1'][ii,:] = stats['binMean'][ii,:] * g * g1
        stats['binMeanSq_g1'][ii,:] = stats['binMeanSq'][ii,:] * g * g1
        stats['binMean_q50_g1'][ii,:] = stats['binMean_q50'][ii,:] * g * g1
        stats['binSTD_g1'][ii,:] = stats['binSTD'][ii,:] * g * g1
    
        
        # MULTIBIN PARAMETERS
        # Calculate high-low ratios - no rebinning so no correction
        
        # find the bin that is just above 10
        highlowIndex10 = np.argmin( abs(spectrumBins - 10) )
        highlowIndex20 = np.argmin( abs(spectrumBins - 20) )
        
        stats['multibin_0_10'][ii,:] = datTemp[0:highlowIndex10,:].sum(axis=0)  # bins 0 to 9, so first ten bins
        stats['multibin_10_256'][ii,:] = datTemp[highlowIndex10:,:].sum(axis=0)
        stats['multibin_0_20'][ii,:] = datTemp[0:highlowIndex20,:].sum(axis=0)
        stats['multibin_20_256'][ii,:] = datTemp[highlowIndex20:,:].sum(axis=0)
    
        stats['multibin_10_ratio'][ii,:]  = stats['multibin_0_10'][ii,:]  /stats['multibin_10_256'][ii,:]
        stats['multibin_20_ratio'][ii,:]  = stats['multibin_0_20'][ii,:]  /stats['multibin_20_256'][ii,:]
    
        # calculate the histogram split location with correction for gain shift in time
        binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:]
        
        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, 256,1)*9.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection, 256,1)*19.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        stats['multibin_0_20_g'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g'][ii,:] = (datTemp * cutt2).sum(axis=0)
        stats['multibin_10_ratio_g'][ii,:] = stats['multibin_0_10_g'][ii,:] / stats['multibin_10_256_g'][ii,:]
        stats['multibin_20_ratio_g'][ii,:] = stats['multibin_0_20_g'][ii,:] / stats['multibin_20_256_g'][ii,:]
    
        # CORRECT ACROSS DETECTORS using the first CC dataset to correct
        binCorrection = gainCorrectionMat[ii,:]/gainCorrectionMat[baselineIndex,:] # current/baseline
        temp = gainCorrectionMat[baselineIndex,:] / gainCorrectionMat[baselineIndex,baselineDetector]
        binCorrection = binCorrection * temp
        
        # bin 10
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*9.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
        
        stats['multibin_0_10_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_10_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        # bin 20
        thresholdMat = np.round(np.matlib.repmat(binCorrection, len(spectrumBins),1)*19.0)  # threshold
        cutt1 = binArray <= thresholdMat
        cutt1 = cutt1.astype(float)
        cutt2 = binArray > thresholdMat
        cutt2 = cutt2.astype(float)
    
        stats['multibin_0_20_g1'][ii,:] = (datTemp * cutt1).sum(axis=0)
        stats['multibin_20_256_g1'][ii,:] = (datTemp * cutt2).sum(axis=0)
    
        stats['multibin_10_ratio_g1'][ii,:] = stats['multibin_0_10_g1'][ii,:] / stats['multibin_10_256_g1'][ii,:]
        stats['multibin_20_ratio_g1'][ii,:] = stats['multibin_0_20_g1'][ii,:] / stats['multibin_20_256_g1'][ii,:]
        
        # transmission value - sum spectra / acquisition time
        stats['transmission'][ii,:] = datTemp.sum(axis = 0) / acqTimeMC[ii]
    return stats

#def LoadStats(basepath, filenameList):
    ## load stats files
    ## make into stats dictionary
    
def WriteStats(stats, filenameList, outputDir, outputFilenamePrefix):    
    """Write the stats values to files. Don't include file extension"""
    statsList = stats.keys()
    for ii in range(len(statsList)):
        fid = open(outputDir + '\\' + outputFilenamePrefix + '_' + statsList[ii] + '.csv', 'wb')
        csvWriterObj = csv.writer(fid, delimiter = ',')
        for jj in range(stats[statsList[ii]].shape[0]):
            csvWriterObj.writerow(np.hstack( ( filenameList[jj], stats[statsList[ii]][jj,:].astype('|S6'))))
        fid.close()    

def CreateStatsCollapsed(stats, groupNamesList, datasetGroupsIndices, detectorList):
    """Consolidating statistics for all datasets in a partigular group"""
    statsList = stats.keys()
    statsCollapsed = dict()
    detectorNumberArray = np.matlib.repmat(np.arange(stats[statsList[0]].shape[1]), stats[statsList[0]].shape[0], 1)
    numberDetectors = len(detectorList)
    
    for ii in range(0, len(groupNamesList)): # cycle through groups of datasets
        group = groupNamesList[ii];
        statsCollapsed[group] = dict()
        for kk in range(0,len(statsList)):  # cycle through stats in a datasets
            stat = statsList[kk]
            numberDatasets = len(datasetGroupsIndices[group])
            statsCollapsed[group][stat] = np.zeros(numberDatasets * numberDetectors)
            statsCollapsed[group]['detectorNumber'] = np.zeros(numberDatasets * numberDetectors)
            for jj in range(0, len(datasetGroupsIndices[group])): # cycle through datasets in group
                startIndex = jj * numberDetectors
                stopIndex = (jj+1) * numberDetectors
                index = datasetGroupsIndices[group][jj]
                arrayName = statsList[kk]
                # reshape the values into a 1-d array
                temp = stats[arrayName][index,detectorList]
                statsCollapsed[group][stat][startIndex:stopIndex] = temp.reshape(np.size(temp))
                temp = detectorNumberArray[index,detectorList]
                statsCollapsed[group]['detectorNumber'][startIndex:stopIndex] = temp.reshape(np.size(temp))
    return statsCollapsed

def CreateStatsCollapsedAll(statsCollapsed, statsCollapseList, groupNamesExportList):
    """Consolidating statistics for all datasets in a particular group"""
    statsCollapsedAll = dict()
    
    # get names of all the groups
    groupNamesList = statsCollapsed.keys()

    # get name of stats stored in the groups
    statsNamesList = statsCollapsed[groupNamesList[0]].keys()
            
    for (ii, group) in enumerate(groupNamesExportList):
        # number of data points (different for different dataset groups)
        numberDataPoints = len(statsCollapsed[group][statsNamesList[0]])
        # Initialize the array for storing the data
        statsCollapsedAll[group] = \
            np.zeros((numberDataPoints, len(statsCollapseList)))
        
        for kk in range(0,len(statsCollapseList)):  # cycle through stats in a datasets
            stat = statsCollapseList[kk]
            statsCollapsedAll[group][:,kk] = statsCollapsed[group][stat]  # big array with all the stuff
    # Combine stats for all groups together into one matrix where the all columns
    # except for the last are the features and the last column is the target value
    # right most column is the classification
    for (ii, group) in enumerate(groupNamesExportList):
        if (ii == 0):
            statsCollapsedAllAll = np.hstack((statsCollapsedAll[group], ii + np.zeros((statsCollapsedAll[group].shape[0], 1))))
        else:
            mm = np.hstack( (statsCollapsedAll[group], ii + np.zeros((statsCollapsedAll[group].shape[0], 1))))
            statsCollapsedAllAll = np.vstack( (statsCollapsedAllAll, mm) )
    return (statsCollapsedAll, statsCollapsedAllAll)


def RunMlpyLDA(features, targets, featuresAllTransmission, targetsAllTransmission, numTrials):

    try:
        w = np.zeros((numTrials, features.shape[1]))
    except:
        w = np.zeros((numTrials, 1))
        
    # Track success percentages
    success = np.zeros(numTrials)
    falsepos = np.zeros(numTrials)
    truepos = np.zeros(numTrials)
    falseneg = np.zeros(numTrials)
    trueneg = np.zeros(numTrials)

    successRate = np.zeros(numTrials)
    falseposRate = np.zeros(numTrials)
    trueposRate = np.zeros(numTrials)
    falsenegRate = np.zeros(numTrials)
    truenegRate = np.zeros(numTrials)
    specificity = np.zeros(numTrials)
    sensitivity = np.zeros(numTrials)

    success0 = np.zeros(numTrials)
    falsepos0 = np.zeros(numTrials)
    truepos0 = np.zeros(numTrials)
    falseneg0 = np.zeros(numTrials)
    trueneg0 = np.zeros(numTrials)

    successRate0 = np.zeros(numTrials)
    falseposRate0 = np.zeros(numTrials)
    trueposRate0 = np.zeros(numTrials)
    falsenegRate0 = np.zeros(numTrials)
    truenegRate0 = np.zeros(numTrials)
    specificity0 = np.zeros(numTrials)
    sensitivity0 = np.zeros(numTrials)

    for ii in range(numTrials):  # cycle through trials

        # Make random set of indices of length equal to number of data points
        cut = np.random.choice(np.arange(len(targets)), len(targets))
        
        featuresSubset = features[cut,:]
        targetsSubset = targets[cut]

        # MLPY
        ldac = mlpy.LDAC()
        ldac.learn(featuresSubset, targetsSubset)
        y_pred = ldac.pred(features)

        # Convert to boolean so that it can be inverted       
        y_pred = y_pred.astype(bool)
        targets = targets.astype(bool)
        
        # Success of the rest of the data points in the transmission window
        success[ii] = sum(y_pred == targets)
        falsepos[ii] = sum(y_pred & ~targets)
        truepos[ii] = sum(y_pred & targets)
        falseneg[ii] = sum(~y_pred & targets)
        trueneg[ii] = sum(~y_pred & ~targets)

        successRate[ii] = success[ii]/float(len(targets))
        falseposRate[ii] = falsepos[ii]/float(len(targets))
        trueposRate[ii] = truepos[ii]/float(len(targets))
        falsenegRate[ii] = falseneg[ii]/float(len(targets))
        truenegRate[ii] = trueneg[ii]/float(len(targets))

        specificity[ii] = trueneg[ii]/float(sum(~targets)) # true negatives / num negatives
        sensitivity[ii] = truepos[ii]/float(sum(targets)) # num true pos / num true
        
        # Success on rest of the data
        y_predAllTransmission = ldac.pred(featuresAllTransmission)
        
        y_predAllTransmission = y_predAllTransmission.astype(bool)
        targetsAllTransmission = targetsAllTransmission.astype(bool)
        
        success0[ii] = sum(y_predAllTransmission == targetsAllTransmission)
        falsepos0[ii] = sum(y_predAllTransmission & ~targetsAllTransmission)
        truepos0[ii] = sum(y_predAllTransmission & targetsAllTransmission)
        falseneg0[ii] = sum(~y_predAllTransmission & targetsAllTransmission)
        trueneg0[ii] = sum(~y_predAllTransmission & ~targetsAllTransmission)
        
        successRate0[ii] = success0[ii]/float(len(targetsAllTransmission))
        falseposRate0[ii] = falsepos0[ii]/float(len(targetsAllTransmission))
        trueposRate0[ii] = truepos0[ii]/float(len(targetsAllTransmission))
        falsenegRate0[ii] = falseneg0[ii]/float(len(targetsAllTransmission))
        truenegRate0[ii] = trueneg0[ii]/float(len(targetsAllTransmission))     

        specificity0[ii] = trueneg0[ii]/float(sum(~targetsAllTransmission)) # true negatives / num negatives
        sensitivity0[ii] = truepos0[ii]/float(sum(targetsAllTransmission)) # num true pos / num true
        
        w[ii,:] = ldac.w()
    
    results = {}
    copyList = [\
        'success', 'falsepos', 'truepos', 'falseneg', 'trueneg', \
        'successRate', 'falseposRate', 'trueposRate', 'falsenegRate', 'truenegRate', \
        'specificity', 'sensitivity', \
        'success0', 'falsepos0', 'truepos0', 'falseneg0', 'trueneg0', \
        'successRate0', 'falseposRate0', 'trueposRate0', 'falsenegRate0', 'truenegRate0', \
        'specificity0', 'sensitivity0', \
        ]
    for c in copyList:
        results[c] = eval(c)
    return(w, results)

def RunMlpyLDAOLD(statsMatrix, trainFraction, numTrials, transmissionRange, includeTransmission):
    # Define cut in transmission
    transmission = statsMatrix[:,-2]
    cutt = (transmission > transmissionRange[0]) & (transmission < transmissionRange[1])
    
    # To include or not include transmission matrix
    if includeTransmission:
        featuresAllTransmission = statsMatrix[:,0:-1] 
    else:
        featuresAllTransmission = statsMatrix[:,0:-2]
    targetsAllTransmission = statsMatrix[:,-1]
    
    # Cut in transmission
    features = statsMatrix[cutt,0:-1] # values, INCLUDE transmission
    targets = statsMatrix[cutt,-1] # classification
    
    w = np.zeros((numTrials, features.shape[1]))
    
    # Track success percentages
    success = np.zeros(numTrials)
    success0 = np.zeros(numTrials)
    #success_sklearn = np.zeros(numTrials)
    
    for ii in range(numTrials):
        # learn on random subset; include random sample of XX%
    
        cut = np.random.randint(0,len(targets), round(len(targets) * trainFraction))
        featuresSubset = features[cut,:]
        targetsSubset = targets[cut]

        ## sklearn
        #lda = LDA()
        #lda.fit(featuresSubset, targetsSubset, store_covariance=True)
        #y_pred = lda.predict(features)
        #success_sklearn[ii] = sum(y_pred == targets)/float(len(targets))

        # MLPY
        ldac = mlpy.LDAC()
        ldac.learn(featuresSubset, targetsSubset)
        y_pred = ldac.pred(features)
        
        # Success of the rest of the data points in the transmission window
        success[ii] = sum(y_pred == targets)/float(len(targets))

        # Success on rest of the data
        success0[ii] = sum(ldac.pred(featuresAllTransmission) == targetsAllTransmission)/float(len(targetsAllTransmission))
        
        w[ii,:] = ldac.w()
    
#    wMean = w.mean(axis =0)    
    # returns success
    return(w, success, success0)

def RunLDA(statsMatrix, trainFraction, numTrials, transmissionRange, includeTransmission):
    # Define cut in transmission
    transmission = statsMatrix[:,-2]
    cutt = (transmission > transmissionRange[0]) & (transmission < transmissionRange[1])
    
    # To include or not include transmission matrix
    if includeTransmission:
        featuresAllTransmission = statsMatrix[:,0:-1] 
    else:
        featuresAllTransmission = statsMatrix[:,0:-2]
    targetsAllTransmission = statsMatrix[:,-1]
    
    # Cut in transmission
    features = statsMatrix[cutt,0:-1] # values, INCLUDE transmission
    targets = statsMatrix[cutt,-1] # classification
    
    w = np.zeros((numTrials, features.shape[1]))
    
    # Track success percentages
    success = np.zeros(numTrials)
    success0 = np.zeros(numTrials)
    #success_sklearn = np.zeros(numTrials)
    
    for ii in range(numTrials):
        # learn on random subset; include random sample of XX%
        cut = np.random.randint(0,len(targets), round(len(targets) * trainFraction))
        featuresSubset = features[cut,:]
        targetsSubset = targets[cut]

        ## sklearn
        ldac = LDA()
        ldac.fit(featuresSubset, targetsSubset, store_covariance=True)
        y_pred = ldac.predict(features)
#        success_sklearn[ii] = sum(y_pred == targets)/float(len(targets))

        # MLPY
#        ldac = mlpy.LDAC()
#        ldac.learn(featuresSubset, targetsSubset)
#        y_pred = ldac.pred(features)
        
        # Success of the rest of the data points in the transmission window
        success[ii] = sum(y_pred == targets)/float(len(targets))

        # Success on rest of the data
        success0[ii] = sum(ldac.predict(featuresAllTransmission) == targetsAllTransmission)/float(len(targetsAllTransmission))
        print ldac.coef_
        w[ii,:] = ldac.coef_.T
    
    wMean = w.mean(axis =0)    
    # returns success
    return(w, success, success0)


def ReBinData():
    1+1
##SIMULATION
def SimulateSpectrum(spectrum, binsInput, binsOutput, numberTrials, t0, t):
    """Simulates one spectrum"""
    # container for the MC spectra
    datMC = np.zeros((len(binsOutput), numberTrials))

    # calculate the total number of counts in spectrum
    totalCounts = spectrum.sum().astype(float)
    
    # calculate mean number of counts expected in simulated data
    numberCountsMC = totalCounts * t / t0  # assume same pulse rate

    # create pdf
    temp = np.interp(binsOutput, binsInput, spectrum.astype(float))
    probdist = temp / temp.sum()

    binEdges = np.arange(257)
    # Run Trials
    for trialNo in range(numberTrials):
        # randomize number of counts
        countsTemp = round(numberCountsMC + np.random.randn() * np.sqrt(numberCountsMC))
        # Calculate bins of x-ray
        bins = np.random.choice(binsOutput, size=countsTemp, replace=True, p=probdist)
        # Make histogram of bins
        datMC[:,trialNo], binEdges = np.histogram(bins, bins = binEdges)
    return datMC


def SimulateDataset(spectra, binEdgesInput, binEdgesOutput, t0, t):
    """Simulate all the spectra for a single dataset only once"""
    # create bin centers
    binCentersInput = np.array((binEdgesInput[1:] + binEdgesInput[:-1])/2.0)
    binCentersOutput = np.array((binEdgesOutput[1:] + binEdgesOutput[:-1])/2.0)
    # make bins for finer pdf
    binEdges0 = np.linspace(0, binEdgesInput[-1], 1000)
    binCenters0 = np.array((binEdges0[1:] + binEdges0[:-1])/2.0)
    
    # Container for the MC spectra
    datMC = np.zeros((len(binCentersOutput), spectra.shape[1]))

    # calculate the total number of counts in each spectrum
    totalCounts = spectra.sum(axis = 0).astype(float)
    print spectra.shape[0]
    print spectra.shape[1]
    
    # Some detectors may be bad and have no counts
    skip = totalCounts == 0;    
    
    # Calculate the mean number of counts expected in simulated data
    
    numberCountsMC = totalCounts * t / t0  # assume same pulse rate

    # Create pdf for each detector
    # The pdf is finer than the original pdf.
    # If the detector has no counts then put eventhing in the first bin
    pdf = np.zeros((len(binCenters0), spectra.shape[1]))
    for ii in np.arange(spectra.shape[1]):
        if skip[ii]:
            pdf[0,ii] = 1.0
        else:
            pdf[:,ii] = np.interp(binCenters0, binCentersInput, spectra[:,ii].astype(float))
        pdf[:,ii] = pdf[:,ii] / pdf[:,ii].sum()
    for ii in np.arange(spectra.shape[1]):
        if skip[ii]:
            datMC[:,ii] = 0
        else:
            # randomized the number of events
            countsTemp = round(numberCountsMC[ii] + np.random.randn() * np.sqrt(numberCountsMC[ii]))
            # Simulate only if there are counts
            if (countsTemp > 1):
                # get x-ray energies using the finer pdf
                bins = np.random.choice(binCenters0, size=countsTemp, replace=True, p=pdf[:,ii])
                # bin the data according to the user bins
                datMC[:,ii], binEdges = np.histogram(bins, bins = binEdgesOutput)
            else:
                datMC[:,ii] = 0
    return datMC

def SimulateAll(basepath, filenameList, binList, timeList, numTrials, outputDir):
    # Load summary data
    infoFilename = basepath + '\\' + 'datasetSummaryOLD.txt'
    (datasetDescription, datasetAcquisitionTime, \
     datasetTime, datasetTimeNum, datasetTimeStr) = \
     GetDatasetInformation(infoFilename, filenameList)

    fullfilenameList = list()
    for ii in range(len(filenameList)):
        fullfilenameList.append(basepath + '\\' + filenameList[ii] + '.csv')
    fullfilenameList = np.array(fullfilenameList)

    dat = ReadZspec(fullfilenameList)
    # cycle throught high stats datasets
    for ii in range(len(filenameList)):        
        # read in file
        filename = filenameList[ii]
        # cycle throught list of data set acquisition times to simulate
        for t in timeList:
            # cycle through different bin lists
            for bins in binList:
                # cycle through trials
                for trial in np.arange(numTrials):
                    
                    print filename + ', t = ' + str(t) + ', bin' + str(bins[0]) + '_' + str(bins[-1]) + str(bins[1] - bins[0]) + ', trial = ' + str(trial)
                    # acquisition time of the high stat data is double because it had 2x pulsing rate.
                    datMC = SimulateDataset(\
                        dat[ii], np.arange(dat[ii].shape[0]+1), bins, datasetAcquisitionTime[ii]*2, t)
                    fullfilenameOutput = \
                        outputDir + '\\' + filename + \
                        '_b' + str(bins[0]) + '_' + str(bins[-1]) + '_' + str(bins[1] - bins[0]) + \
                        '_t' + str(t) + '_T' + str(trial) + '.csv'
                    SaveSpectra(datMC, fullfilenameOutput)

def CreateSimulationDataSummaryFile(filenamelistMC, acqTimeList, datasetDescription, datasetAcquisitionTime, \
 datasetTime, datasetTimeNum, datasetTimeStr):
    1+1
def GenerateDefaultDetectorList():
    """
        Create list of bad and good detectors. 1st set.
        4/8/2014, JK updated code, remove detector 137 which doesn't exist
    """
#    # list of bad detectors, first detector = 1
#    t = np.array([15, 20, 26, 31, 33, 39, 40, 44, 53, 56, 62, 68, \
#                  76, 80, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137])
#    badDetectors = np.concatenate((np.arange(1,9), t)) - 1
#    temp = np.zeros(137)
#    temp[badDetectors] = True
#    temp = temp.astype(bool)
#    goodDetectors = ~temp
#    goodDetectorsList = np.array(np.where(goodDetectors))[0] # first detector = 0
#    badDetectorsList = np.array(np.where(~goodDetectors))[0] # first detector = 0
#    return (goodDetectorsList, badDetectorsList)
    badDetectorsList = np.array([1, 2, 3, 4, 5, 6, 7, 8, 15, 20, 26, 31, 33, 39, 40, 44, 53, 56, 62, 68, \
                  76, 80, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136])
    badDetectorsMask = np.zeros(136) == 1  # false array
    badDetectorsMask[badDetectorsList - 1] = True
    goodDetectorsMask = ~badDetectorsMask
    goodDetectorsList = np.array(np.where(goodDetectorsMask))[0]
    badDetectorsList = np.array(np.where(badDetectorsMask))[0]
    return (goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask)

def GenerateDefaultDetectorList2ndSet():
    """
        Create list of bad and good detectors. 2nd set
        4/8/2014, JK, before goodDetectorsList had first detector as 0, while badDetectorsList started with 1;
            Fixed this so that starting
    """
#    badZspecList = np.array([1,2,3,4,5,6,7,8,19,20,26,62,68,76,80,121,122,123,123,124, 125,126,127,128,129,130,131,132,133,134,135,136])
#    badZspecMask = np.zeros(136) == 1
#    badZspecMask[badZspecList - 1] = True
#    goodZspecMask = ~badZspecMask
#    goodDetectorsList = np.array(np.where(goodDetectors))[0]
    badDetectorsList = np.array([1,2,3,4,5,6,7,8,19,20,26,62,68,76,80,121,122,123,123,124,125,126,127,128,129,130,131,132,133,134,135,136]) # index starts at 1
    badDetectorsMask = np.zeros(136) == 1  # false array
    badDetectorsMask[badDetectorsList - 1] = True
    goodDetectorsMask = ~badDetectorsMask
    goodDetectorsList = np.array(np.where(goodDetectorsMask))[0]
    badDetectorsList = np.array(np.where(badDetectorsMask))[0]
    return (goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask)

def GenerateDefaultDatasetFilenameList(basepath):
    """
        List of filename prefixes and full filenames for first set of zspec data.
    """
    fullfilenameList = []
    filenameList = []
    for ii in range(95,100):
        filenameList.append('dx' + '%02d' % ii)
    for ii in range(0,65):
        filenameList.append('dy' + '%02d' % ii)
    filenameList = np.array(filenameList)

    for ii in range(len(filenameList)):
        fullfilenameList.append(basepath + '\\' + filenameList[ii] + '.csv')
    fullfilenameList = np.array(fullfilenameList)
    return (filenameList, fullfilenameList)

def GenerateDefaultDatasetFilenameList2ndSet(basepath):
    """
        For second set of zspec data taken in March 2014.
        Returns the filename prefixes and the full filenames of the csv files.
        4/2/2014, JK
    """
    fullfilenameList = []
    filenameList = []
    
    prefix = 'ec'
    for i in xrange(76,100):
        filenameList.append('%s%02d' %(prefix, i))
    
    prefix = 'ed'
    for i in xrange(0,85):
        filenameList.append('%s%02d' %(prefix, i))
    # list of datasets to remove from the list
    prefixRemoveList = ['ed13', 'ed17']
    for p in prefixRemoveList:
        filenameList.remove(p)    
    filenameList = np.array(filenameList)

    for ii in range(len(filenameList)):
        fullfilenameList.append(os.path.join(basepath, '%s.csv' %filenameList[ii]))
    fullfilenameList = np.array(fullfilenameList)
    return (filenameList, fullfilenameList)

def GenerateDefaultDatasetGroupList(filenameList):
    """
        Returns the datasetGroups dict which contains the list of filename prefixes
        for each dataset group ("Fe", etc).  Also returns datasetGroupsIndices which 
        a dict of all the dataset indices referring to elements in filenameList.
        For 1st group of zspec datasets.
    """
    datasetGroups = dict()
    datasetGroups['CC'] = ('dx95', 'dy00', 'dy01', 'dy05', \
                           'dy10', 'dy15', 'dy16', 'dy22', 'dy35', 'dy36')
    datasetGroups['Pb'] = ('dx96', 'dx97', 'dx98', 'dx99')
    datasetGroups['Fe'] = ('dy02', 'dy03', 'dy04')
    datasetGroups['Al'] = ('dy06', 'dy07', 'dy08', 'dy09')

    datasetGroups['Pb3Fe'] = ('dy11', 'dy12', 'dy13', 'dy14')
    datasetGroups['Pb3Al'] = ('dy17', 'dy18', 'dy19', 'dy20', 'dy21')
    datasetGroups['Pb4Fe'] = ('dy23', 'dy24', 'dy25', 'dy26', 'dy27', 'dy28')
    datasetGroups['Pb4Al'] = ('dy37', 'dy38', 'dy39', 'dy40', 'dy41', 'dy42', 'dy43')

    # does not include the low statistics datasets
    datasetGroups['PbALL'] = datasetGroups['Pb'] + \
                             datasetGroups['Pb3Fe'] + \
                             datasetGroups['Pb3Al'] + \
                             datasetGroups['Pb4Fe'] + \
                             datasetGroups['Pb4Al']
    # does not include the low statistics datasets
    datasetGroups['PbNOT'] = datasetGroups['Fe'] + \
                             datasetGroups['Al']
    # low stats data sets
    datasetGroups['PbLS'] = ('dy45', 'dy46', 'dy47', 'dy48', 'dy49', \
                           'dy50', 'dy51', 'dy52', 'dy53', 'dy54')
    datasetGroups['FeLS'] = ('dy55', 'dy56', 'dy57', 'dy58', 'dy59')
    datasetGroups['AlLS'] = ('dy60', 'dy61', 'dy62', 'dy63', 'dy64')

    datasetGroups['Pb5_8'] = ('dx96', 'dx99')
    datasetGroups['Fe12'] = 'dy04'
    datasetGroups['Al30'] = 'dy09'

    datasetGroups['PbALLALL'] = datasetGroups['PbLS'] + datasetGroups['PbALL']
    datasetGroups['PbNOTNOT'] = datasetGroups['FeLS'] + \
                             datasetGroups['AlLS'] + datasetGroups['PbNOT']

    groupNames = datasetGroups.keys()
    datasetGroupsIndices = dict()

    for ii in range(0, len(groupNames)):
        temp = []
        if isinstance(datasetGroups[groupNames[ii]], str):
            datasetGroupsIndices[groupNames[ii]] = np.where(filenameList == datasetGroups[groupNames[ii]])[0]
        else:
            for jj in range(0, len(datasetGroups[groupNames[ii]])):
                index = np.where(filenameList ==  datasetGroups[groupNames[ii]][jj] )[0][0]
                temp.append(index)
            datasetGroupsIndices[groupNames[ii]] = np.array(temp)

    return (datasetGroups, datasetGroupsIndices)
    

def GenerateDefaultDatasetGroupList2ndSet(filenameList):
    """
        Returns the datasetGroups dict which contains the list of filename prefixes
        for each dataset group ("Fe", etc).  Also returns datasetGroupsIndices which 
        a dict of all the dataset indices referring to elements in filenameList.
        For 2nd group of zspec datasets.
        4/2/2014, JK
    """

    filenameList = list(filenameList)
    # dataset groups
    datasetGroups = {}
    datasetGroupsWidth = {}
    
    #datasetGroups['CC'] = ('')
    # 5, 4, 6, 7, 8
    datasetGroups['Pb'] = ['ec79', 'ed69', 'ec78', 'ed70', 'ec80', 'ed71',  'ec81', 'ed72', 'ec82', 'ed73']
    datasetGroupsWidth['Pb'] = np.zeros( (len(datasetGroups['Pb']), 2) )
    datasetGroupsWidth['Pb'][:,0] = [4,   4.5,  5.0, 5.5,  6.0,     6.5,    7.0,   7.5,    8.0,  8.5]
    
    datasetGroups['Pb_0'] = [        'ec79', 'ec78', 'ec80', 'ec81', 'ec82']
    datasetGroupsWidth['Pb_0'] = np.zeros( (len(datasetGroups['Pb_0']), 2) )
    datasetGroupsWidth['Pb_0'][:,0] = [4.,     5.,    6.,     7.,     8.]
    
    datasetGroups['Pb_1'] = [       'ed69', 'ed70', 'ed71', 'ed72',  'ed73']
    datasetGroupsWidth['Pb_1'] = np.zeros( (len(datasetGroups['Pb_1']), 2) )
    datasetGroupsWidth['Pb_1'][:,0] = [4.5,   5.5,    6.5,     7.5,      8.5]
    
    
    # 8, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
    datasetGroups['Fe'] = ['ec87', 'ec88', 'ed61', 'ec89', 'ed62', 'ec90', 'ed63', 'ec91', 'ed64', 'ec92', 'ed65']
    datasetGroupsWidth['Fe'] = np.zeros( (len(datasetGroups['Fe']), 2) )
    datasetGroupsWidth['Fe'][:,0] = [8,  8,     9,       10,     11,     12,     13,     14,     15,     16,     17]
    
    # 8, 8, 10, 12, 14, 16
    datasetGroups['Fe_0'] = ['ec87', 'ec88', 'ec89', 'ec90', 'ec91', 'ec92']
    datasetGroupsWidth['Fe_0'] = np.zeros( (len(datasetGroups['Fe_0']), 2) )
    datasetGroupsWidth['Fe_0'][:,0] = [8,   8,     10,     12,     14,    16]
    
    # 9, 11, 13, 15, 17
    datasetGroups['Fe_1'] = ['ed61', 'ed62', 'ed63', 'ed64', 'ed65']
    datasetGroupsWidth['Fe_1'] = np.zeros( (len(datasetGroups['Fe_1']), 2) )
    datasetGroupsWidth['Fe_1'][:,0] = [9,  11,     13,     15,     17]
    
    
    # first set: 16, 20, 24, 28  second set: 24, 26, 28, 30, 32, 34, 38
    #datasetGroups['Al'] = ['ec94', 'ec95', 'ec96', 'ec97', 'ed52', 'ed53', 'ed54', 'ed55', 'ed56', 'ed57', 'ed58']
    datasetGroups['Al'] = ['ec94', 'ec95', 'ec96', 'ed52', 'ed53', 'ec97', 'ed54', 'ed55', 'ed56', 'ed57', 'ed58']
    datasetGroupsWidth['Al'] = np.zeros( (len(datasetGroups['Al']), 2) )
    datasetGroupsWidth['Al'][:,0] = [16,  20,    24,     24,     26,    28,      28,     30,      32,    34,    38]
    
    
    datasetGroups['Al_0'] = ['ec94', 'ec95', 'ec96', 'ec97']
    datasetGroupsWidth['Al_0'] = np.zeros( (len(datasetGroups['Al_0']), 2) )
    datasetGroupsWidth['Al_0'][:,0] = [16,  20,    24,     28]
    
    datasetGroups['Al_1'] = ['ed52', 'ed53', 'ed54', 'ed55', 'ed56', 'ed57', 'ed58']
    datasetGroupsWidth['Al_1'] = np.zeros( (len(datasetGroups['Al_1']), 2) )
    datasetGroupsWidth['Al_1'][:,0] = [24, 26,     28,     30,      32,    34,    38]
    
    # calibration and in between datasets
    datasetGroups['CC'] = ['ec76', 'ec77', 'ec83', 'ec84', 'ec85', \
        'ec86', 'ec93', 'ec98', 'ec99', 'ed00', 'ed01', 'ed02', 'ed08', 'ed09', \
        'ed10', 'ed11', 'ed12', 'ed14', 'ed15', 'ed21', 'ed27', 'ed28', 'ed35',\
        'ed36', 'ed37', 'ed43', 'ed44', 'ed49', 'ed50', 'ed51', 'ed59', 'ed60', 'ed66', 'ed67', 'ed68', 'ed74', 'ed84']
    datasetGroupsWidth['CC'] = np.zeros( (len(datasetGroups['CC']), 2) )
    datasetGroupsWidth['CC'][:,0] = 4
    
    # mixtures
    # 3 inch Pb, Fe 4, 6, 8, 10 ,12
    datasetGroups['Pb3Fe'] = ['ed03', 'ed04', 'ed05', 'ed06', 'ed07']
    datasetGroupsWidth['Pb3Fe'] = np.zeros( (len(datasetGroups['Pb3Fe']), 2) )
    datasetGroupsWidth['Pb3Fe'][:,0] = 3
    datasetGroupsWidth['Pb3Fe'][:,1] = np.array([4, 6, 8, 10, 12])
    
    # 4 inch Pb, Fe 2, 2, 4, 5, 6, 7, 8, 9, 10
    datasetGroups['Pb4Fe'] = ['ed38', 'ed75', 'ed76', 'ed39', 'ed77', 'ed40', 'ed78', 'ed41', 'ed79', 'ed42']
    datasetGroupsWidth['Pb4Fe'] = np.zeros( (len(datasetGroups['Pb4Fe']), 2) )
    datasetGroupsWidth['Pb4Fe'][:,0] = 4
    datasetGroupsWidth['Pb4Fe'][:,1] = np.array([2, 2, 3, 4, 5, 6, 7, 8, 9, 10])
    
    # 4 inch Pb, Fe 2, 4, 6, 8, 10
    datasetGroups['Pb4Fe_0'] = ['ed38', 'ed39', 'ed40', 'ed41', 'ed42']
    datasetGroupsWidth['Pb4Fe_0'] = np.zeros( (len(datasetGroups['Pb4Fe_0']), 2) )
    datasetGroupsWidth['Pb4Fe_0'][:,0] = 4
    datasetGroupsWidth['Pb4Fe_0'][:,1] = np.array([2, 4, 6, 8, 10])
    
    # 4 inch Pb, Fe 2, 3, 5, 7, 9
    datasetGroups['Pb4Fe_1'] = ['ed75', 'ed76', 'ed77', 'ed78', 'ed79']
    datasetGroupsWidth['Pb4Fe_1'] = np.zeros( (len(datasetGroups['Pb4Fe_1']), 2) )
    datasetGroupsWidth['Pb4Fe_1'][:,0] = 4
    datasetGroupsWidth['Pb4Fe_1'][:,1] = np.array([2, 3, 5, 7, 9])
    
    
    # 5 inch Pb, Fe 2, 4, 6, 8
    datasetGroups['Pb5Fe'] = ['ed45', 'ed46', 'ed47', 'ed48']
    datasetGroupsWidth['Pb5Fe'] = np.zeros( (len(datasetGroups['Pb5Fe']), 2) )
    datasetGroupsWidth['Pb5Fe'][:,0] = 5
    datasetGroupsWidth['Pb5Fe'][:,1] = np.array([2, 4, 6, 8])
    
    # 3 inch Pb, Al 12, 16, 20, 24
    datasetGroups['Pb3Al'] = ['ed20', 'ed16', 'ed18', 'ed19']
    datasetGroupsWidth['Pb3Al'] = np.zeros( (len(datasetGroups['Pb3Al']), 2) )
    datasetGroupsWidth['Pb3Al'][:,0] = 3
    datasetGroupsWidth['Pb3Al'][:,1] = np.array([12, 16, 20, 24])
    
    
    # 4 inch Pb, Al 8, 12, 16, 20, 24
    datasetGroups['Pb4Al'] = [    'ed22', 'ed80', 'ed81',   'ed23', 'ed82',  'ed24', 'ed83', 'ed25', 'ed26']
    datasetGroupsWidth['Pb4Al'] = np.zeros( (len(datasetGroups['Pb4Al']), 2) )
    datasetGroupsWidth['Pb4Al'][:,0] = 4
    datasetGroupsWidth['Pb4Al'][:,1] = np.array([8., 8., 10.,  12., 14., 16., 18., 20., 24.])
    
    # 4 inch Pb, Al 12, 16, 20, 24
    datasetGroups['Pb4Al_0'] = ['ed22', 'ed23', 'ed24', 'ed25', 'ed26']
    datasetGroupsWidth['Pb4Al_0'] = np.zeros( (len(datasetGroups['Pb4Al_0']), 2) )
    datasetGroupsWidth['Pb4Al_0'][:,0] = 4
    datasetGroupsWidth['Pb4Al_0'][:,1] = np.array([8, 12, 16, 20, 24])
    
    # 4 inch Pb, Al 12, 16, 20, 24
    datasetGroups['Pb4Al_1'] = ['ed80', 'ed81', 'ed82', 'ed83']
    datasetGroupsWidth['Pb4Al_1'] = np.zeros( (len(datasetGroups['Pb4Al_1']), 2) )
    datasetGroupsWidth['Pb4Al_1'][:,0] = 4
    datasetGroupsWidth['Pb4Al_1'][:,1] = np.array([8, 10, 14, 18])
    
    
    # 5 inch Pb, Al 4, 8, 12, 16, 20, 24
    datasetGroups['Pb5Al'] = ['ed29', 'ed30', 'ed31', 'ed32', 'ed33', 'ed34']
    datasetGroupsWidth['Pb5Al'] = np.zeros( (len(datasetGroups['Pb5Al']), 2) )
    datasetGroupsWidth['Pb5Al'][:,0] = 5
    datasetGroupsWidth['Pb5Al'][:,1] = np.array([4, 8, 12, 16, 20, 24])
    
    # Between-Runs
    datasetGroups['PbIB'] = ['ec76', 'ec83']
    
    datasetGroups['FeIB'] = ['ec86', 'ec93', 'ed60', 'ed66'] # voltage was changed after ec86
    datasetGroups['FeIB_0'] = ['ec86', 'ec93'] # voltage was changed after ec86
    datasetGroups['FeIB_1'] = ['ed60', 'ed66'] # voltage was changed after ec86
    
    datasetGroups['AlIB'] = ['ec93', 'ec98', 'ed51', 'ed59']
    datasetGroups['AlIB_0'] = ['ec93', 'ec98'] # first set
    datasetGroups['AlIB_1'] = ['ed51', 'ed59'] # second set, more attenuation
    
    
    # mixed materials
    datasetGroups['Pb3FeIB'] = ['ed02', 'ed08']
    
    datasetGroups['Pb4FeIB'] = ['ed37', 'ed43', 'ed74', 'ed84']
    datasetGroups['Pb4FeIB_0'] = ['ed37', 'ed43']
    datasetGroups['Pb4FeIB_1'] = ['ed74', 'ed84']
    
    datasetGroups['Pb5FeIB'] = ['ed44', 'ed49']
    
    datasetGroups['Pb3AlIB'] = ['ed15', 'ed21']
    
    datasetGroups['Pb4AlIB'] = ['ed21', 'ed27', 'ed74', 'ed84']
    datasetGroups['Pb4AlIB_0'] = ['ed21', 'ed27']
    datasetGroups['Pb4AlIB_1'] = ['ed74', 'ed84']
    
    datasetGroups['Pb5AlIB'] = ['ed28', 'ed35']
    
    datasetGroups['pure'] = datasetGroups['Pb'] + datasetGroups['Fe'] + datasetGroups['Al']
    datasetGroups['PbFeMix'] =datasetGroups['Pb3Fe'] + datasetGroups['Pb4Fe'] + datasetGroups['Pb5Fe']
    datasetGroups['PbAlMix'] =datasetGroups['Pb3Al'] + datasetGroups['Pb4Al'] + datasetGroups['Pb5Al']
    datasetGroups['mix'] = datasetGroups['PbFeMix'] + datasetGroups['PbAlMix']
    
    
    #datasetGroups['PbFe'] = ('ed03', 'ed04', 'ed05', 'ed06', 'ed07')
    datasetGroups['IB'] = datasetGroups['PbIB'] + datasetGroups['FeIB'] + datasetGroups['AlIB'] + \
        datasetGroups['Pb3FeIB'] + datasetGroups['Pb4FeIB'] + datasetGroups['Pb5FeIB'] + \
        datasetGroups['Pb3AlIB'] + datasetGroups['Pb4AlIB'] + datasetGroups['Pb5AlIB']
    
    # get unique list 
    datasetGroups['IB'] = list(set(datasetGroups['IB']))
    datasetGroups['IB'].sort()    
    
    
    # does not include the low statistics datasets
    datasetGroups['PbALL'] = datasetGroups['Pb'] + \
                             datasetGroups['Pb3Fe'] + \
                             datasetGroups['Pb3Al'] + \
                             datasetGroups['Pb4Fe'] + \
                             datasetGroups['Pb4Al'] + \
                             datasetGroups['Pb5Fe'] + \
                             datasetGroups['Pb5Al']

    # does not include the low statistics datasets
    datasetGroups['PbNOT'] = datasetGroups['Fe'] + \
                             datasetGroups['Al']    

    datasetGroups['PbALLALL'] = copy.copy(datasetGroups['PbALL'])
    datasetGroups['PbNOTNOT'] = copy.copy(datasetGroups['PbNOT'])
    
    # the first and second subsets
    # only the Pb4Fe and Pb4Al were taken a second time
    datasetGroups['PbALL_0'] = datasetGroups['Pb_0'] + \
                             datasetGroups['Pb3Fe'] + \
                             datasetGroups['Pb3Al'] + \
                             datasetGroups['Pb4Fe_0'] + \
                             datasetGroups['Pb4Al_0'] + \
                             datasetGroups['Pb5Fe'] + \
                             datasetGroups['Pb5Al']

    datasetGroups['PbALL_1'] = datasetGroups['Pb_1'] + \
                             datasetGroups['Pb4Fe_1'] + \
                             datasetGroups['Pb4Al_1']
    # no lead
    datasetGroups['PbNOT_0'] = datasetGroups['Fe_0'] + \
                             datasetGroups['Al_0']    
    datasetGroups['PbNOT_1'] = datasetGroups['Fe_1'] + \
                             datasetGroups['Al_1']    


#    groupNames = datasetGroups.keys()
#    datasetGroupsIndices = dict()
#
#    for ii in range(0, len(groupNames)):
#        temp = []
#        if isinstance(datasetGroups[groupNames[ii]], str):
#            datasetGroupsIndices[groupNames[ii]] = np.where(filenameList == datasetGroups[groupNames[ii]])[0]
#        else:
#            for jj in range(0, len(datasetGroups[groupNames[ii]])):
#                index = np.where(filenameList ==  datasetGroups[groupNames[ii]][jj] )[0][0]
#                temp.append(index)
#            datasetGroupsIndices[groupNames[ii]] = np.array(temp)
    
    # Get the indicies for each dataset in each group list
    datasetGroupsIndices = {}
    for k in datasetGroups.keys():
    #    datasetGroupsIndices[k] = []
        temp = []
        for prefix in datasetGroups[k]:
    #        datasetGroupsIndices[k].append(np.where(prefixList == prefix)[0][0])
            temp.append(filenameList.index(prefix))
        datasetGroupsIndices[k] = np.array(temp)
        
    # all the possible dataset materials 
    tempList = ['Pb', 'Fe', 'Al', 'CC', 'Pb3Fe', 'Pb4Fe', 'Pb5Fe', 'Pb3Al', 'Pb4Al', 'Pb5Al']
    # create 1-to-1 arrays for material type and widths
    datasetMaterial = []
    datasetRawWidth = []
    
    for j,p in enumerate(filenameList):
        print(j)
        for i,t in enumerate(tempList):
            if p in datasetGroups[t]:
                datasetMaterial.append(t)
                index = datasetGroups[t].index(p)
                datasetRawWidth.append(datasetGroupsWidth[t][index])
                break
            if i == (len(tempList)-1): # if not assigned a group then ''
                datasetMaterial.append('')
                print(p)
    
    datasetRawWidth = np.array(datasetRawWidth)
    datasetMaterial = np.array(datasetMaterial)        
    
    return(datasetGroups, datasetGroupsIndices,datasetGroupsWidth, datasetRawWidth, datasetMaterial)
    
    
class DisplayZspecBasic:
    def __init__(self, dat, energy):
        self.dat = dat
        self.energy = energy;
        self.fig1 = plt.figure(99)
        self.ax1 = self.fig1.add_subplot(111)
        
        self.ax1.imshow(self.dat.sum(2).T, interpolation = 'nearest', aspect='auto')
        
        self.fig1.canvas.show()
        
        (self.x1, self.x2, self.y1, self.y2) = (0,0,0,0)
                
        self.cid1 = self.fig1.canvas.mpl_connect('button_press_event', self.on_press)
        self.cid2 = self.fig1.canvas.mpl_connect('button_release_event', self.on_release)
        self.cid3 = self.fig1.canvas.mpl_connect('motion_notify_event', self.on_motion)
        self.cid4 = self.fig1.canvas.mpl_connect('key_press_event', self.on_key)
        
        self.height = self.dat.shape[1]
        self.width = self.dat.shape[0]
        
        self.fig2 = plt.figure(100)
        self.ax2 = self.fig2.add_subplot(1,1,1)

        self.rect = Rectangle((0,0), 0,0, alpha = 0.5, color = 'g')

    # pressing a key will change the view
    def on_key(self, event):
        print('you pressed', event.key, event.xdata, event.ydata)
        plt.figure(self.fig1.number)
        if (event.key == u'c'):
            plt.imshow(self.dat.sum(2).T, interpolation = 'nearest', aspect='auto')
            print("Total Counts")
        elif (event.key == u'm'):            
            energyMatrix = np.tile(self.energy, (self.dat.shape[0], self.dat.shape[1], 1))
            print energyMatrix.shape
            intensity = (self.dat * energyMatrix).sum(2) / self.dat.sum(2)
            plt.imshow(intensity.T, interpolation = 'nearest', aspect='auto')
            print("Mean Bin")
        elif (event.key == u'v'):            
            energyMatrix = np.tile(self.energy, (self.dat.shape[0], self.dat.shape[1], 1))
            binMean = (self.dat * energyMatrix).sum(2) / energyMatrix.sum(2)
            binMeanMatrix = np.tile(binMean, (self.dat.shape[0], 1, 1))
            
            intensity = np.sqrt( (( (energyMatrix - binMeanMatrix)**2) * self.dat ).astype(float) / self.dat.sum(2).astype(float)  )
                        
            plt.imshow(intensity.T, interpolation = 'nearest', aspect='auto')
            print("Std Bin")
            
            
        stats['binMean'][ii,:] = (binArray * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)
        temp = np.matlib.repmat(stats['binMean'][ii,:], datTemp.shape[0],1)  # array of means, 256 x number detectors
    
        stats['binSTD'][ii,:] = np.sqrt(  ( ((binArray - temp)**2) * datTemp).sum(axis = 0).astype(float) / datTemp.sum(axis = 0).astype(float)  )
        stats['binSTD_binMean'][ii,:] = stats['binSTD'][ii,:] / stats['binMean'][ii,:]
            
    def on_press(self, event):
        self.x1, self.y1 = event.xdata, event.ydata
        print("Press: ", self.x1, self.y1)
        
    def on_release(self, event):
        self.x2, self.y2 = event.xdata, event.ydata
        print("Release: ", self.x2, self.y2)
        
        x1 = round(self.x1)
        y1 = round(self.y1)          
        x2 = round(self.x2)
        y2 = round(self.y2)
        
        xrange = [min(x1, x2), max(x1, x2)+1]
        yrange = [min(y1, y2), max(y1, y2)+1]

        # highlight the patches
        # first remove the existing patch if it exists
        try:
            if self.ax1.patches !=[]:
                self.ax1.patches.pop()
            self.rect = Rectangle((xrange[0]-0.5,yrange[0]-0.5), xrange[1] - xrange[0], yrange[1] - yrange[0], alpha = 0.5, color = 'g') 
            self.ax1.add_patch(self.rect)
            self.fig1.canvas.draw()
        except:
            print "asdf"

        if (self.x1 == self.x2) and (self.y1 == self.y2):
            if (x1 <= self.width) and (x1 >= 0) and (y1 <= self.height) and (y1 >= 0):
                plt.figure(self.fig2.number)
                plt.clf()

#                self.ax2.plot(self.dat[x1, y1,:])
#                self.ax2.set_title("{}, {}, {}, {}".format(self.x1, self.x2, self.y1, self.y2))
                plt.plot(self.dat[x1, y1,:])
                plt.title("{}, {}, {}, {}".format(self.x1, self.x2, self.y1, self.y2))
                self.fig2.canvas.show()
        else:
            plt.figure(self.fig2.number)
            plt.clf()
            if (xrange[0] == xrange[1]) and (yrange[0] != yrange[1]):
#                self.ax2.plot(self.dat[xrange[0], yrange[0]:yrange[1],:].mean(0))
                plt.plot(self.dat[xrange[0], yrange[0]:yrange[1],:].mean(0))
                
            elif (xrange[0] != xrange[1]) and (yrange[0] == yrange[1]):
#                self.ax2.plot(self.dat[xrange[0]:xrange[1], yrange[0],:].mean(0))
                plt.plot(self.dat[xrange[0]:xrange[1], yrange[0],:].mean(0))
                
            else:
#                self.ax2.plot(self.dat[xrange[0]:xrange[1], yrange[0]:yrange[1],:].mean(0).mean(0))
                plt.plot(self.dat[xrange[0]:xrange[1], yrange[0]:yrange[1],:].mean(0).mean(0))
            
            plt.title( "x: %3.2f, %3.2f;y: %3.2f, %3.2f\n# Pixels: %d" %(self.x1, self.x2, self.y1, self.y2, (xrange[1] - xrange[0]-1)*(yrange[1] - yrange[0]-1)) )
             
#            self.ax2.set_title("x: %3.2f, %3.2f;y: %3.2f, %3.2f\n# Pixels: %d" \
#                %(self.x1, self.x2, self.y1, self.y2, (len(xrange)-1)*(len(yrange)-1)))
            self.fig2.canvas.show()
            
     
    def on_motion(self,event):
        1+1


#lowerBound = 10, upperBound = 200, yLower = 100, yUpper = 200, yLowerAboveCargo = 7, yUpperAboveCargo = 22):

def FindBasicZspecStart(filename, detectorList = np.array([31, 34, 35, 36]), lowerBound = 100, upperBound = 400):
    # see if we are giving it a data array or a file name
    try:  
        filename.shape[0]
        dat = filename
    except:
        (a, dat) = ReadZspecBasicScanNumpy(filename)
    # use this array to find the start
    dat2 = dat[:,detectorList,:].mean(2).max(1)
    threshold = (dat2[lowerBound:upperBound].max() + dat2[lowerBound:upperBound].min())/2.0
#    print threshold
    cut = (dat2 < threshold) & (np.arange(len(dat2)) > lowerBound) & (np.arange(len(dat2)) < upperBound)
    try:
        startBin = np.where(cut)[0][0]
    except:
        startBin = -1
    return(startBin)

def FindBasicZspecStartVer2(filename, detectorList = np.array([31, 32, 33, 34, 35, 36, 37, 38, 39, 40]), lowerBound = 200, upperBound = 300, backgroundLower = 100, backgroundUpper = 150):
    """
        Find start bin 
        Works with Set 2
    """
    # see if we are giving it a data array or a file name
    try:  
        filename.shape[0]
        dat = filename
    except:
        (a, dat) = ReadZspecBasicScanNumpy(filename)
    MINBIN = 6
    # use this array to find the start
    dat2 = dat[:,detectorList,MINBIN:].mean(2).mean(1)
    baseline = dat2[backgroundLower:backgroundUpper].mean()
    delta = dat2[lowerBound:upperBound].max() - baseline
    threshold = baseline + delta * 0.25
#    print threshold
    cut = (dat2 > threshold) & (np.arange(len(dat2)) > lowerBound) & (np.arange(len(dat2)) < upperBound)
    try:
        startBin = np.where(cut)[0][0] - 1  # -1 fudge factor to get line up with cargo/rad images
    except:
        startBin = -1
    return(startBin)


def FindCombinations(arrayIn):
    if (len(arrayIn) == 2):
        return([[arrayIn[0]],[arrayIn[1]], arrayIn ])
    else:
        # get list of combos not including the first element
        combos = FindCombinations(arrayIn[1:])
        print('Combos found')
        print combos
        combosAll = []
        combosAll.append([arrayIn[0]])
        for c in combos:
            print c
            combosAll.append(c)
            combosAll.append(c + [arrayIn[0]] )
        return(combosAll)
        
        
def ReadCargoDataDescriptionFile(fullFilename):    
    lines = []
    with open(fullFilename, 'rb') as fid:
        for lineIn in fid:
            lines.append(lineIn)
        
    fileDescript = {}
    
    fileDescript['config'] = []
    fileDescript['cargo'] = []
    fileDescript['targets'] = []
    fileDescript['scanType'] = []
    fileDescript['scanID'] = []
    fileDescript['dataFile'] = []
    fileDescript['rawImageFile'] = []
    fileDescript['notes'] = []
    
    for i in xrange(len(lines)):
        # skip the first line
        if i == 0:
            continue
        if (i < (len(lines)-1)):
            lineIn = lines[i][0:-2]
        else:
            lineIn = lines[i]
        lineInSplit = lineIn.split('\t')
        fileDescript['config'].append(int(lineInSplit[0]))
        fileDescript['cargo'].append(lineInSplit[1])
        fileDescript['targets'].append(lineInSplit[2])
        fileDescript['scanType'].append(lineInSplit[3])
        fileDescript['scanID'].append(lineInSplit[4])
        fileDescript['dataFile'].append(lineInSplit[5])
        fileDescript['rawImageFile'].append(lineInSplit[6])
        fileDescript['notes'].append(lineInSplit[7])
    return(fileDescript)