##PBAR_Zspec_DetermineBestAlgorithm2ndSet_Ver2.py
#
# Modification of PBAR_Zspec_DetermineBestAlgorithm.py which was used to determine the 
# best algorithm on the first set of zspec data.
# Needs mlpy.
#
#   Loads data, makes corrections, makes nice arrays for training, 
#
#   4/2//2013, John Kwong

#import csv
#import matplotlib.pyplot as plt
import os, cPickle, copy
import numpy as np
import matplotlib.pyplot as plt
#import numpy.matlib
#import datetime
#import time
import PBAR_Zspec
reload(PBAR_Zspec)
from sklearn.feature_selection import SelectKBest, chi2, f_classif, f_oneway
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn import cross_validation
from sklearn.metrics import precision_recall_fscore_support
from sklearn.preprocessing import scale
from scipy.optimize import curve_fit

#from mpl_toolkits.mplot3d import Axes3D
# import mlpy
#from scipy.optimize import curve_fit

reload(PBAR_Zspec)

def gauss_function(x, a, x0, sigma):
    return a*np.exp(-(x-x0)**2/(2*sigma**2))

# Set useful plot variables
plotColors = ['r', 'b', 'g', 'm', 'c', 'y', 'k'] * 10
lineStyles = ['-', '-.', ':', '_', '|'] *  10
markerTypes = ['.', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd']
markerTypes = markerTypes * 2

######################################
##  LOAD DATA AND CALCULATE STATS   ##
######################################

# Threshold bin. The first "binThreshold" bins are set to zero
binThreshold = 8
# number of cross-validation cycles
numTrials = 50
# step size in count window
countStepSize = 25
# size of the window
windowSize = 50
# maximum count upper range window
MaxCount = 375

# the set number
setNum = 2

# SET THE DATASET LOCATION

# First set
if setNum == 1:
    basepath = r'C:\Users\jkwong\Documents\Work\PBAR\data'
    infoFilename = os.path.join(basepath,'datasetSummaryOLD.txt')
# Second set
elif setNum == 2 or setNum == 3 or setNum == 4:
    # SECOND SET
    basepath = r'C:\Users\jkwong\Documents\Work\PBAR\data4\Mar-files'
    infoFilename = os.path.join(basepath,'DatasetSummary2ndSet.txt')


# LOAD LIST OF FILES, DATASET GROUPS, LIST OF GOOD/BAD DETECTORS
if setNum == 1:
    # Create list of datasets
    (filenameList, fullfilenameList) = \
                   PBAR_Zspec.GenerateDefaultDatasetFilenameList(basepath)               
    # Create list of dataset groups
    (datasetGroups, datasetGroupsIndices) = \
                    PBAR_Zspec.GenerateDefaultDatasetGroupList(filenameList)
    # Create list of good/bad detectors
    (goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask) = \
                        PBAR_Zspec.GenerateDefaultDetectorList()
    (goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
            goodZspecIndices, badZspecIndices) = PBAR_Zspec.ZspecDetectorLists()
elif setNum == 2 or setNum == 3 or setNum == 4:
    # Create list of datasets
    (filenameList, fullfilenameList) = \
                   PBAR_Zspec.GenerateDefaultDatasetFilenameList2ndSet(basepath)               
    # Create list of dataset groups
    (datasetGroups, datasetGroupsIndices, datasetGroupsWidth, datasetRawWidth, datasetMaterial) = \
                    PBAR_Zspec.GenerateDefaultDatasetGroupList2ndSet(filenameList)
    # Create list of good/bad detectors
    (goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask) = \
                        PBAR_Zspec.GenerateDefaultDetectorList2ndSet()
    (goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
            goodZspecIndices, badZspecIndices) = PBAR_Zspec.ZspecDetectorLists2ndSet()

# LOAD DATASET SUMMARY
(datasetDescription, datasetAcquisitionTime, \
    datasetTime, datasetTimeNum, datasetTimeStr) = \
    PBAR_Zspec.GetDatasetInformation(infoFilename, filenameList)

# LOAD ZSPEC DATA
print "Loading Data"
dat = PBAR_Zspec.ReadZspec(fullfilenameList)

# Load Radiography data
#(datRad, datRadZspec, radMap) = PBAR_Zspec.ReadRad(basepath)

# Read in the Calibration file
(calTimeNum, calGainShift) = PBAR_Zspec.LoadGainCalibration(os.path.join(basepath, 'GainCorrectionVer2.csv'))

# Generate extrapolated gain matrix
# misnomer - is actually the gain calibration bin
gainExtrapolated = PBAR_Zspec.ExtrapolateGain(calTimeNum, calGainShift, datasetTimeNum)

# Multiple the bin array with this to calibrate
gainCorrection = 100.0/gainExtrapolated

# Xray Pulse Rate
if setNum == 1:
    # Pulse rate which has always be 60 Hz
    pulseRate = 60 * np.ones(len(dat))
elif setNum == 2 or setNum == 3 or setNum == 4:
    # Pulse rate is not always 60Hz unlike in the first set.
    filenameList = list(filenameList)
    pulseRate = np.ones(len(filenameList)) * 250.
    pulseRate[0] = 60.
    pulseRate[filenameList.index('ec76')] = 60.
    pulseRate[filenameList.index('ec84')] = 60.
    pulseRate[filenameList.index('ec85')] = 60.
    filenameList = np.array(filenameList)

# calculate the stats
#stats = PBAR_Zspec.CalculateStats(dat, datasetAcquisitionTime, np.arange(256).astype(float), gainCorrection, basepath, binThreshold, pulseRate)
#stats = PBAR_Zspec.CalculateStats(dat, datasetAcquisitionTime, np.arange(256).astype(float), gainCorrection, binThreshold, pulseRate)
# Interpolation version to lessen discrete binning effects
stats = PBAR_Zspec.CalculateStatsInterp(dat, datasetAcquisitionTime, np.arange(256).astype(float), gainCorrection, binThreshold, pulseRate)

## Save values to file
#PBAR_Zspec.WriteStats(stats, filenameList, outputDir, 'd')

######################################################
##  MAKE ARRAYS FOR FEATURES SELECTION FUNCTION


# Detectors worth including
detectorList = goodDetectorsList[(goodDetectorsList >= 41) & (goodDetectorsList < 120)]

#detectorList = goodDetectorsList[(goodDetectorsList >= 70) & (goodDetectorsList < 71)]


# The dict containing the collapsed stuff will contain only these groups of data sets
#groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
#                           'PbALLALL', 'PbNOTNOT', \
#                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al', 'Pb5Fe', 'Pb5Al'))
#                           
#groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
#                           'PbALLALL', 'PbNOTNOT', 'PbLS', 'FeLS', 'AlLS', \
#                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al'))

if setNum == 1:  # set1 doesn't have Pb5Al or Pb5Fe
    groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
                               'PbALLALL', 'PbNOTNOT', \
                               'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al'))
elif setNum == 2 or setNum == 3 or setNum == 4:
    groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
                           'PbALLALL', 'PbNOTNOT', \
                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al', 'Pb5Fe', 'Pb5Al', \
                           'PbALL_0', 'PbALL_1', 'PbNOT_0', 'PbNOT_1'))

# Make a dictionary of dictionaries containing a values from a subset of the detectors.
# statsCollapsed[<dataset group name>][<statistics name>] = 
#        values for all detectors specified for all datasets in the group
#
statsCollapsed = PBAR_Zspec.CreateStatsCollapsed(stats, groupNamesList, datasetGroupsIndices, detectorList)

# This defines the columns of the feature matrix (X)
#statsCollapseListALL = np.array(( \
#    'binSTD_binMean', \
#    'binMean_g1', 'binSTD_g1', \
##    'binMean', 'binSTD', \
#    'binMean_q50_g1', 'q_range_ratio',  \
#    'multibin_20_ratio_g1', 'multibin_10_ratio_g1'))

statsCollapseListALL = np.array(( \
    'binSTD_binMean', \
#    'binMean', 'binSTD', \
    'binMean_g1', 'binSTD_g1', \
    'binMean_q50_g1', 'q_range_ratio',  \
    'multibin_20_ratio_g1', 'multibin_10_ratio_g1', \
    'q_range_g1', 'q_50_g1',
    'binMoment3', 'binMoment4', 'binSkew', 'binKur'
    ))

statsCollapseListALL = np.array(( \
    'binSkew', 'binKur', \
    'binMean_g1', 'binSTD_g1', \
#    'q_range_ratio',  \
    'multibin_20_ratio_g1', \
#    'q_range_g1', 'q_50_g1',
    ))

# Make combination of indices
comboListTemp = PBAR_Zspec.FindCombinations(list(np.arange(len(statsCollapseListALL))))

# Limit the types of combos
comboList = []
for c in comboListTemp:
    if (len(c) >= 3) & (len(c) <= 4):
        comboList.append(list(np.sort(c)))  # make sure it's sorted before appending

# Make the stats matrix that contains all the features we want
# Only include data from these groups for the test/training matrix
# groupNamesExportList = np.array(('PbALL', 'PbNOT'))

if setNum == 1 or setNum == 2:
    groupNamesExportList = np.array(('PbNOT', 'PbALL'))
elif setNum == 3:
    groupNamesExportList = np.array(('PbNOT_0', 'PbALL')) # assumes that the Pb is fine in 2a and 2b

elif setNum == 4:
    groupNamesExportList = np.array(('PbNOT_1', 'PbALL')) # assumes that the Pb is fine in 2a and 2b

# - statsMatrix contains the features and truth value
# - statsCollapsedMatrix, dict of two matrices, one containing only Pb, other containing not Pb
# statsMatrix, vertical concatenation of the two matrices in statsCollapsedMatrix; the last colum is the target value (1 = lead)
# make matrix containing only the counts
# - countCollapsedMatrix - same as statsCollapsedMatrix except only contain counts
# - countMatrix - same as statsMatrix but only contains counts
(statsCollapsedMatrix, statsMatrix) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, groupNamesExportList)
(countCollapsedMatrix, countMatrix) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], groupNamesExportList)
countMatrix = countMatrix[:,0]

## make matrix containing more datasets
#(statsCollapsedMatrixIncludingLowStats, statsMatrixIncludingLowStats) = \
#    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOTNOT', 'PbALLALL')))
## the counts
#(countCollapsedMatrixIncludingLowStats, countMatrixIncludingLowStats) = \
#    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOTNOT', 'PbALLALL')))

if setNum == 2 or setNum == 3 or setNum == 4:
    # for the first subset of the second set of zspec data (2a)
    (statsCollapsedMatrix_0, statsMatrix_0) = \
        PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOT_0', 'PbALL_0')))
    (countCollapsedMatrix_0, countMatrix_0) = \
        PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOT_0', 'PbALL_0')))
    countMatrix_0 = countMatrix_0[:,0]    
    
    # for the second subset of the second set of zspec data (2b)
    (statsCollapsedMatrix_1, statsMatrix_1) = \
        PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOT_1', 'PbALL_1')))
    (countCollapsedMatrix_1, countMatrix_1) = \
        PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOT_1', 'PbALL_1')))
    countMatrix_1 = countMatrix_1[:,0]



# PERFORM FEATURE SELECTION
print('Perform Feature Selection')

# feature and target matrices for all count per pulse
X_AllTransmission = statsMatrix[:,:-1]
y_AllTransmission = statsMatrix[:,-1]

# create classifer objects, these will be trained and then copied to separate container
classifier = {}
classifier['lda'] = LDA()
classifier['lr'] = LogisticRegression()
classifierKeys = classifier.keys()

# Create containers for the select k-best objects and trained classifers
# structure: <count range index> <number features>
classifiersTrialAllAll = []
scoresTrialAllAll = []
classifiersWholeAllAll = []
scoresWholeAllAll = []
topFeaturesIndicesAllAll = []
topFeatureScoresAllAll = []
maximumFeatures = len(statsCollapseListALL)
maximumFeatures = min(6,len(statsCollapseListALL))

#countRange = np.array((0, 2000))
countRangesList = [(i, i+windowSize) for i in xrange(0, MaxCount - windowSize, countStepSize)]

countRangesList.append([50, 150])
countRangesList.append([150, 250])
countRangesList.append([250, 350])
countRangesList.append([50, 300])
countRangesList.append([50, 350])


# go through list of count ranges
for (countRangeIndex, countRange) in enumerate(countRangesList):

    classifiersTrialAll = []
    scoresTrialAll = []
    classifiersWholeAll = []
    scoresWholeAll = []

    topFeaturesIndicesAll = []
    topFeatureScoresAll = []
    
    print('Count Range %d, %d' %(countRange[0], countRange[1]))
    # Cut in count
    cutCount = (countMatrix > countRange[0]) & (countMatrix < countRange[1])
    
    # define feature target matrices
    X = statsMatrix[cutCount,:][:,:-1]
#    X_scaled = scale(X)  # remove offset, divide by std
    X_scaled = copy.copy(X)  # no scaling of the features
    
    y = statsMatrix[cutCount,:][:,-1]
    
    # cycle through number of features from 1 to maximum number of features
    trainSize = 0.75
    for numFeaturesKeep in xrange(1, maximumFeatures+1):    
        print("Number of features %d" %numFeaturesKeep)
#        ch2 = SelectKBest(chi2, k=numFeaturesKeep)
        ch2 = SelectKBest(f_classif, k=numFeaturesKeep)
        ch2.fit(X_scaled, y)
        topFeaturesIndices = ch2.get_support(indices = True)
        topFeatureScores = np.array(ch2.scores_)[topFeaturesIndices]
        orderorder = np.argsort(topFeatureScores)[-1::-1]
        
        topFeaturesIndicesAll.append(topFeaturesIndices)   
        topFeatureScoresAll.append(topFeatureScores)
        
        stringstring = ''
        for ooIndex, oo in enumerate(orderorder):
#            print(ooIndex)
            featureIndex = topFeaturesIndices[oo]
            stringstring = stringstring + '%d) %s: %3.3f; ' %(ooIndex, statsCollapseListALL[featureIndex], topFeatureScores[oo]) 
        print(stringstring)

        # initialize containers
        classifiers = {}
        scoresTrial = {}
        scoresWhole = {}
        
        classifiersTrial = []
        classifiersWhole = {}

        for key in classifierKeys:
#            classifiers[key] = []
            scoresTrial[key] = np.zeros((numTrials, 5)).astype(float)
            scoresWhole[key] = np.zeros(5).astype(float)
            
        # run trials
        for numTrial in xrange(numTrials):
            # perform random split
            X_train, X_test, y_train, y_test = cross_validation.train_test_split(X_scaled[:,topFeaturesIndices], y, train_size = trainSize)
            for key in classifierKeys:
                classifier[key].fit(X_train, y_train)
                pred = classifier[key].predict(X_test)            
                p, r, fbeta_score, support = precision_recall_fscore_support(y_test, pred, average = 'micro')
                classifiers[key] = copy.deepcopy(classifier[key])
                # save precision, recall, f1, and support, success
                scoresTrial[key][numTrial,:] = np.array([p, r, fbeta_score, support,  sum(y_test == pred) / float(len(pred))])
            classifiersTrial.append(classifiers)

        for key in classifierKeys:
            scoresMean = scoresTrial[key].mean(0)
            scoresSTD = scoresTrial[key].std(0)
#            print('%s, Precision %3.3f, Recall %3.3f, f %3.3f, success %3.3f' %(key, p, r, fbeta_score))
            print('Split, %s, Precision %3.3f, Recall %3.3f, f %3.3f, success %3.3f' %(key, scoresMean[0], scoresMean[1], scoresMean[2], scoresMean[4] ))
            
        # run classifer on all all data points in the count range (no train/test split)
        for key in classifierKeys:
            X_train = X_scaled[:,topFeaturesIndices]
            X_test = X_train
            y_train = y
            y_test = y
            classifier[key].fit(X_train, y_train)
            pred = classifier[key].predict(X_test) 
            p, r, fbeta_score, support = precision_recall_fscore_support(y_test, pred, average = 'micro')
            classifiersWhole[key] = copy.deepcopy(classifier[key])
            # save precision, recall, f1, and support, success
            scoresWhole[key] = np.array([p, r, fbeta_score, support,  sum(y_test == pred) / float(len(pred))])
            print('Whole, %s, Precision %3.3f, Recall %3.3f, f %3.3f, success %3.3f' %(key, p, r, fbeta_score, sum(y_test == pred) / float(len(pred)) ))
            
        # dump stuff into contain for each number feature cycle
        scoresTrialAll.append(scoresTrial)
        scoresWholeAll.append(scoresWhole)
        classifiersTrialAll.append(classifiersTrial)
        classifiersWholeAll.append(classifiersWhole)
    scoresTrialAllAll.append(scoresTrialAll)
    scoresWholeAllAll.append(scoresWholeAll)
    classifiersTrialAllAll.append(classifiersTrialAll)
    classifiersWholeAllAll.append(classifiersWholeAll)
    topFeaturesIndicesAllAll.append(topFeaturesIndicesAll)
    topFeatureScoresAllAll.append(topFeatureScoresAll)
    print(' ')


# DECIDE ON THE MOST SUCCESSFUL FEATURES AND THEN TRAIN ON THESE

# Select a count range and number of features
countRangeIndex = 2
numberFeaturesIndex = 1

lda = classifiersWholeAllAll[countRangeIndex][numberFeaturesIndex]['lda']
topFeaturesIndices = topFeaturesIndicesAllAll[countRangeIndex][numberFeaturesIndex]
countRange = countRangesList[countRangeIndex]

#cutCount = (countMatrix > countRange[0]) & (countMatrix < countRange[1])
X_All = statsMatrix[:,:-1]
y_All = statsMatrix[:,-1]
pred = lda.predict(X_All[:,topFeaturesIndices])
count_All = countMatrix

X_All_transformed = np.dot(X_All[:,topFeaturesIndices] - lda.xbar_, lda.scalings_)
X_All_transformed2 = np.dot(X_All[:,topFeaturesIndices] - lda.xbar_, lda.scalings_)
X_All_transformed3 = np.dot(X_All_transformed2, lda.coef_.T) + lda.intercept_



# FIT POLYNOMIAL ON THE transformed values

# FIT GAUSSIANS TO THE PB-ALL BAND

fitWindowSize = 5
fitCountStart = 0
fitMaxCount = 50
fitCountStepSize = 2
fitCountBinEdgesList0 = [np.array([i, i+fitWindowSize]) for i in xrange(fitCountStart, fitMaxCount - fitWindowSize, fitCountStepSize)]

fitWindowSize = 10
fitCountStart = 50
fitMaxCount = 150
fitCountStepSize = 5
fitCountBinEdgesList1 = [np.array([i, i+fitWindowSize]) for i in xrange(fitCountStart, fitMaxCount - fitWindowSize, fitCountStepSize)]

fitWindowSize = 50
fitCountStart = 150
fitMaxCount = 375
fitCountStepSize = 25
fitCountBinEdgesList2 = [np.array([i, i+fitWindowSize]) for i in xrange(fitCountStart, fitMaxCount - fitWindowSize, fitCountStepSize)]

fitCountBinEdgesList = fitCountBinEdgesList0 + fitCountBinEdgesList1 + fitCountBinEdgesList2

fitCountBinCentersList = np.array([np.mean(b) for b in fitCountBinEdgesList])

#fitParamsPbAll = np.zeros( (len(fitCountBinEdgesList), 3))
#fitParamsPbNot = np.zeros( (len(fitCountBinEdgesList), 3))

fitParams  = {}
histCounts = {}
# Calculate the distance quantity

#dist0Best = np.sum(np.matlib.repmat(wBEST, statsMatrix.shape[0], 1) * \
#            statsMatrix[:,comboMatch], axis = 1)

dist0Best = X_All_transformed2.T[0]
            
distBinedges = np.arange(-20, 20, 1)
distBincenters = (distBinedges[0:-1] + distBinedges[1:])/2.0


for groupNameIndex, groupName in enumerate(groupNamesExportList):
    fitParams[groupName] = np.zeros( (len(fitCountBinEdgesList), 3))
    histCounts[groupName] = []
    
    for (countBinEdgesIndex, countBinEdges) in enumerate(fitCountBinEdgesList):    
        cutt = (count_All >= countBinEdges[0]) & (count_All < countBinEdges[1])

        if groupName == 'PbALL':
            cutt = cutt & y_All.astype(bool)  # is lead (true)
        elif groupName == 'PbNOT':
            cutt = cutt & ~y_All.astype(bool)  # is lead (true)
        elif groupName == 'PbNOT_0':
            cutt = cutt & ~y_All.astype(bool)  # is lead (true)

        # make histogram of the values
        counts, binedges = np.histogram(dist0Best[cutt], distBinedges)
        histCounts[groupName].append(counts)
        
        startingParam = [0.5 * max(counts), np.mean(dist0Best[cutt]), np.std(dist0Best[cutt])]
        try:
            popt, pcov = curve_fit(gauss_function, distBincenters, counts, p0 = startingParam)
        except:
            print("Gaussian, Bad Fit")
            popt = [-1, -1, -1]
        fitParams[groupName][countBinEdgesIndex,:] = np.array(popt)


pfit = {}
pfitError = {}
polyOrderList = np.arange(0, 30, 1)
xTail = [330, 340, 350, 360, 370, 380, 390, 400, 410, 420]

polyOrderTailIndex = 4
x = np.array(list(fitCountBinCentersList) + xTail)

for varIndex, varName in enumerate(['mean', 'sigma']):
    pfit[varName] = {}
    pfitError[varName] = {}    
    for groupNameIndex, groupName in enumerate(groupNamesExportList):  # 'PbNOT', 'PbALL'
        pfit[varName][groupName] = {}
        pfitError[varName][groupName] = {}    
        for subsetNameIndex, subsetName in enumerate(['all', 'tail', 'extend']):        
            pfit[varName][groupName][subsetName] = []
            pfitError[varName][groupName][subsetName] = []
            for polyOrderIndex, polyOrder in enumerate(polyOrderList):
                #varIndex+1 gives the fit parameter index
                # 1 - mean
                # 2 - sigma
                if subsetName == 'all':
                    cut = fitCountBinCentersList > -1
                    x = fitCountBinCentersList[cut]
                    y = fitParams[groupName][cut,varIndex+1]
                    xarray = np.arange(0, 325)
                elif subsetName == 'tail':
                    cut = fitCountBinCentersList > 150
                    x = fitCountBinCentersList[cut]
                    y = fitParams[groupName][cut,varIndex+1]
                    xarray = np.arange(150, 325)
                elif subsetName == 'extend':
                    x = np.array(list(fitCountBinCentersList) + xTail)
                    cut = x > -1
                    y = np.array( list(fitParams[groupName][:,varIndex+1]) + list(np.polyval(pfit[varName][groupName]['tail'][polyOrderTailIndex], xTail))  )    # add extra tail points
                    xarray = np.arange(0, 325)
                
                pfit[varName][groupName][subsetName].append(np.polyfit(x, y, polyOrder))
                
                # get the interpolation and poly nomial extrapolations
                yInterp = np.interp(xarray, fitCountBinCentersList, fitParams[groupName][:,varIndex+1])
                yPoly = np.polyval(pfit[varName][groupName][subsetName][polyOrderIndex], xarray)
                
                pfitError[varName][groupName][subsetName].append( np.sum( (yInterp - yPoly)**2 ) / float(len(yPoly)))


# WRITE RESULTS TO FILE

datOut = {}

datOutCopyList = \
    ['calTimeNum', 'badDetectorsMask', 'goodZspecMask','datasetAcquisitionTime','binThreshold', \
    'stats', 'datasetGroups', 'filenameList', 'calGainShift', 'goodDetectorsMask',
    'datasetMaterial', 'datasetDescription', 'goodZspecNameList', 'datasetTimeNum',
    'datasetTimeStr', 'datasetGroupsWidth', 'datasetRawWidth', 'goodDetectorsList',
    'badZspecMask', 'goodZspecIndices', 'gainExtrapolated', 'dat', 'badZspecNameList',
    'gainCorrection', 'badZspecIndices', 'badDetectorsList', 'datasetTime', 'datasetGroupsIndices', 'pulseRate', \
    'detectorList','groupNamesList','statsCollapsed','statsCollapseListALL','groupNamesExportList', \
    'comboListTemp','statsCollapsedMatrix','statsMatrix','countCollapsedMatrix','countMatrix', \
    'statsCollapsedMatrixIncludingLowStats', 'statsMatrixIncludingLowStats','countCollapsedMatrixIncludingLowStats', \
    'countMatrixIncludingLowStats','numTrials','countStepSize','windowSize','MaxCount',\
    'wMeanAllAll','successRateMeanAll','specificityMeanAll','sensitivityMeanAll', \
    'successRate0MeanAll','specificity0MeanAll','sensitivity0MeanAll', \
    'scoresTrialAllAll', 'scoresWholeAllAll', 'classifiersTrialAllAll', 'classifiersWholeAllAll', \
    'topFeaturesIndicesAllAll', 'topFeatureScoresAllAll', 'statsCollapsedMatrix_0', \
    'statsMatrix_0', 'countCollapsedMatrix_0', 'countMatrix_0', 'statsCollapsedMatrix_1', \
    'statsMatrix_1', 'countCollapsedMatrix_1', 'countMatrix_1', 'pfitError', 'pfit', \
    'countRangesList', 'classifiersWholeAllAll', 'topFeaturesIndicesAllAll'
    ]


for d in datOutCopyList:
    try:
        exec("datOut['%s'] = %s" %(d,d))
    except:
        print('%s does not exist. Skipping.' %d)

fullFilename = os.path.join(basepath, 'zspec%dSet.dat' %setNum)
with open(fullFilename ,'wb') as fid:
    print('Writing %s' %fullFilename)
    cPickle.dump(datOut, fid, protocol = 2)

#del datOut