##PBAR_Zspec_DetermineBestAlgorithm2ndSet.py
#
# Modification of PBAR_Zspec_DetermineBestAlgorithm.py which was used to determine the 
# best algorithm on the first set of zspec data.
# Needs mlpy.
#
#   Loads data, makes corrections, makes nice arrays for training, 
#
#
#   4/2//2013, John Kwong

#import csv
#import matplotlib.pyplot as plt
import os, cPickle, copy
import numpy as np
#import numpy.matlib
#import datetime
#import time
import PBAR_Zspec
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn import cross_validation
from sklearn.metrics import precision_recall_fscore_support


#from mpl_toolkits.mplot3d import Axes3D
# import mlpy
#from scipy.optimize import curve_fit

reload(PBAR_Zspec)


for featureIndex in xrange(len(statsCollapseListALL)):
    plt.figure()
    plt.grid()
    
    cut = statsMatrix[:,-1] == 0
    plt.plot(countMatrix[cut], statsMatrix[cut,featureIndex], '.k', alpha = 0.5)
    
    cut = statsMatrix[:,-1] == 1
    plt.plot(countMatrix[cut], statsMatrix[cut,featureIndex], '.r', alpha = 0.5)

    temp = statsMatrix[cut,featureIndex]
    cut2 = (countMatrix[cut] > 100) & (countMatrix[cut] < 300)

    axis((0, 400, 0, 2*temp[cut2].mean()))
    
    xlabel('Count')
    ylabel(statsCollapseListALL[featureIndex])
    if savePlots:
        filename = os.path.join(plotPath, '%s_Count_Set2_Interp.png' %statsCollapseListALL[featureIndex])
        plt.savefig(filename)
def gauss_function(x, a, x0, sigma):
    return a*np.exp(-(x-x0)**2/(2*sigma**2))

# Set useful plot variables
plotColors = ['r', 'b', 'g', 'm', 'c', 'y', 'k'] * 10
lineStyles = ['-', '-.', ':', '_', '|'] *  10
markerTypes = ['.', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd']
markerTypes = markerTypes * 2

######################################
##  LOAD DATA AND CALCULATE STATS   ##
######################################

# Set dataset location
basepath = r'C:\Users\jkwong\Documents\Work\PBAR\data4\Mar-files'
infoFilename = os.path.join(basepath,'DatasetSummary2ndSet.txt')
setName = '1st'

#basepath = r'C:\Users\jkwong\Documents\Work\PBAR\data4\Mar-files'
#infoFilename = os.path.join(basepath,'DatasetSummary2ndSet.txt')
#setName = '1st'


# Create list of datasets
(filenameList, fullfilenameList) = \
               PBAR_Zspec.GenerateDefaultDatasetFilenameList2ndSet(basepath)               
# Create list of dataset groups
(datasetGroups, datasetGroupsIndices,datasetGroupsWidth, datasetRawWidth, datasetMaterial) = \
                PBAR_Zspec.GenerateDefaultDatasetGroupList2ndSet(filenameList)
# Create list of good/bad detectors
(goodDetectorsList, badDetectorsList, goodDetectorsMask, badDetectorsMask) = \
                    PBAR_Zspec.GenerateDefaultDetectorList2ndSet()
                    
(goodZspecMask, badZspecMask, goodZspecNameList, badZspecNameList, \
        goodZspecIndices, badZspecIndices) = PBAR_Zspec.ZspecDetectorLists2ndSet()
# Load summary data
#infoFilename = basepath + '\\' + 'datasetSummaryOLD.txt'

(datasetDescription, datasetAcquisitionTime, \
    datasetTime, datasetTimeNum, datasetTimeStr) = \
    PBAR_Zspec.GetDatasetInformation(infoFilename, filenameList)

# Load Zspec data
print "Loading Data"
dat = PBAR_Zspec.ReadZspec(fullfilenameList)

# Load Radiography data
#(datRad, datRadZspec, radMap) = PBAR_Zspec.ReadRad(basepath)

# Read in the Calibration file
(calTimeNum, calGainShift) = PBAR_Zspec.LoadGainCalibration(os.path.join(basepath, 'GainCorrectionVer2.csv'))

# Generate extrapolated gain matrix
gainExtrapolated = PBAR_Zspec.ExtrapolateGain(calTimeNum, calGainShift, datasetTimeNum)

# multiple the bin array with this to calibrate
gainCorrection = 100.0/gainExtrapolated

# Threshold bin. The first "binThreshold" bins are set to zero
binThreshold = 8

# Pulse rate is not always 60Hz unlike in the first set.
#pulseRate = 60 * np.ones(len(dat))
filenameList = list(filenameList)
pulseRate = np.ones(len(filenameList)) * 250.
pulseRate[0] = 60.
pulseRate[filenameList.index('ec76')] = 60.
pulseRate[filenameList.index('ec84')] = 60.
pulseRate[filenameList.index('ec85')] = 60.

filenameList = np.array(filenameList)

# calculate the stats
#stats = PBAR_Zspec.CalculateStats(dat, datasetAcquisitionTime, np.arange(256).astype(float), gainCorrection, basepath, binThreshold, pulseRate)
stats = PBAR_Zspec.CalculateStats(dat, datasetAcquisitionTime, np.arange(256).astype(float), gainCorrection, binThreshold, pulseRate)


## Save values to file
#PBAR_Zspec.WriteStats(stats, filenameList, outputDir, 'd')

#########################################################
##   MAKE NICE ARRAYS TO PUT INTO LEARNING FUNCTIONS   ##
#########################################################

# Detectors worth including
detectorList = goodDetectorsList[(goodDetectorsList > 40) & (goodDetectorsList < 120)]

# The dict containing the collapsed stuff will contain only these groups of data sets
#groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
#                           'PbALLALL', 'PbNOTNOT', \
#                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al', 'Pb5Fe', 'Pb5Al'))
#                           
#groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
#                           'PbALLALL', 'PbNOTNOT', 'PbLS', 'FeLS', 'AlLS', \
#                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al'))
groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT', \
                           'PbALLALL', 'PbNOTNOT', \
                           'Pb3Fe', 'Pb3Al', 'Pb4Fe', 'Pb4Al', 'Pb5Fe', 'Pb5Al', \
                           'PbALL_0', 'PbALL_1', 'PbNOT_0', 'PbNOT_1'))
 

                          

# Make a dictionary of dictionaries containing a values from a subset of the detectors.
# statsCollapsed[<dataset group name>][<statistics name>] = 
#        values for all detectors specified for all datasets in the group
#
statsCollapsed = PBAR_Zspec.CreateStatsCollapsed(stats, groupNamesList, datasetGroupsIndices, detectorList)

# This defines the columns of the feature matrix (X)
statsCollapseListALL = np.array(( \
    'binSTD_binMean', \
    'binMean_g1', 'binSTD_g1', \
#    'binMean', 'binSTD', \
    'binMean_q50_g1', 'q_range_ratio',  \
    'multibin_20_ratio_g1', 'multibin_10_ratio_g1'))

#statsCollapseListALL = np.array(( \
#    'binSTD_binMean', \
#    'binMean_g1', 'binSTD_g1', \
#    'multibin_20_ratio_g1'))

# Only include data from these groups for the test/training matrix
# groupNamesExportList = np.array(('PbALL', 'PbNOT'))
groupNamesExportList = np.array(('PbNOT', 'PbALL'))

# Make combination of indices
comboListTemp = PBAR_Zspec.FindCombinations(list(np.arange(len(statsCollapseListALL))))

# Limit the types of combos
comboList = []
for c in comboListTemp:
    if (len(c) >= 3) & (len(c) <= 4):
        comboList.append(list(np.sort(c)))  # make sure it's sorted before appending

# make the stats matrix that contains all the features we want
#
# - statsMatrix contains the features and truth value
# - statsCollapsedMatrix, dict of two matrices, one containing only Pb, other containing not Pb
# statsMatrix, vertical concatenation of the two matrices in statsCollapsedMatrix; the last colum is the target value (1 = lead)
(statsCollapsedMatrix, statsMatrix) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, groupNamesExportList)

# make matrix containing only the counts
# - countCollapsedMatrix - same as statsCollapsedMatrix except only contain counts
# - countMatrix - same as statsMatrix but only contains counts
(countCollapsedMatrix, countMatrix) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], groupNamesExportList)
countMatrix = countMatrix[:,0]

# for the first subset of the second set of zspec data (2a)
(statsCollapsedMatrix_0, statsMatrix_0) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOT_0', 'PbALL_0')))
(countCollapsedMatrix_0, countMatrix_0) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOT_0', 'PbALL_0')))
countMatrix_0 = countMatrix_0[:,0]    

# for the second subset of the second set of zspec data (2b)
(statsCollapsedMatrix_1, statsMatrix_1) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOT_1', 'PbALL_1')))
(countCollapsedMatrix_1, countMatrix_1) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOT_1', 'PbALL_1')))
countMatrix_1 = countMatrix_1[:,0]    


# make matrix containing more datasets
(statsCollapsedMatrixIncludingLowStats, statsMatrixIncludingLowStats) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseListALL, np.array(('PbNOTNOT', 'PbALLALL')))
# the counts
(countCollapsedMatrixIncludingLowStats, countMatrixIncludingLowStats) = \
    PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, ['count'], np.array(('PbNOTNOT', 'PbALLALL')))

numTrials = 50
countStepSize = 50
windowSize = 100
MaxCount = 400

#countRange = np.array((0, 2000))
countRangesList = [(i, i+windowSize) for i in xrange(0, MaxCount - windowSize, countStepSize)]

# make containers
resultsAllAll = []
copyList = [\
    'success', 'falsepos', 'truepos', 'falseneg', 'trueneg', \
    'successRate', 'falseposRate', 'trueposRate', 'falsenegRate', 'truenegRate', \
    'specificity', 'sensitivity', \
    'success0', 'falsepos0', 'truepos0', 'falseneg0', 'trueneg0', \
    'successRate0', 'falseposRate0', 'trueposRate0', 'falsenegRate0', 'truenegRate0', \
    'specificity0', 'sensitivity0', \
    ]
resultsAll = {}
for c in copyList:
    resultsAll[c] = []
resultsAllAll = [resultsAll for i in xrange(len(countRangesList))]

wMeanAllAll = []
##topStatsIndicesAllAll = []
successRateMeanAll = []
specificityMeanAll = []
sensitivityMeanAll = []
successRate0MeanAll = []
specificity0MeanAll = []
sensitivity0MeanAll = []

# go through list of count ranges
for (countRangeIndex, countRange) in enumerate(countRangesList):
    
    print('Count Range %d, %d' %(countRange[0], countRange[1]))
    # Cut in count
    cutCount = (countMatrix > countRange[0]) & (countMatrix < countRange[1])
    
    # OLD stuff

    # create temporary container
    topStatsIndicesAll = []
    wMeanAll = []
    resultsAll = {}
    for c in copyList:
        resultsAll[c] = []
    for (comboIndex, combo) in enumerate(comboList):  # go through list of combinations
        # Print status

        print('Combo', combo)
        # the features are in are in all the colunns except for the last
        # create one matrix that only includes data points in the count range and 
        # create another matrix that includes features for points of all count
        featureMatrix = statsMatrix[cutCount,:][:,combo]
        featureMatrixAllTransmission = statsMatrix[:,combo]

        # the target matrix is the last column, has values of 0,1 for no lead, lead
        targetMatrix = statsMatrix[cutCount,:][:,-1]
        targetMatrixAllTransmission = statsMatrix[:,-1]
        
        #  APPLY LDA
        (w, results) = PBAR_Zspec.RunMlpyLDA(\
            featureMatrix, targetMatrix, featureMatrixAllTransmission, targetMatrixAllTransmission, numTrials)
        
        # Append results to list
        keysList = results.keys()
        for c in keysList:
            resultsAll[c].append(results[c])    
        wMean = w.mean(axis = 0)
        wMeanAll.append(wMean)

        print("Success on training set: %3.3f" % results['successRate'].mean())
        print("Specificity on training set: %3.3f" % results['specificity'].mean())
        print("Sensitivity on training set: %3.3f" % results['sensitivity'].mean())
    
        print("Success on all data: %3.3f" % results['successRate0'].mean())
        print("Specificity on all data: %3.3f" % results['specificity0'].mean())
        print("Sensitivity on all data: %3.3f" % results['sensitivity0'].mean())      
        print 'Done.'
        
##        topStatsIndicesAll.append(topStatsIndices)
    wMeanAllAll.append(wMeanAll)
##    topStatsIndicesAllAll.append(topStatsIndicesAll)
    # Summarize the training results
    successRateMean = np.array([s.mean() for s in resultsAll['successRate']])
    specificityMean = np.array([s.mean() for s in resultsAll['specificity']])
    sensitivityMean = np.array([s.mean() for s in resultsAll['sensitivity']])
    
    successRate0Mean =  np.array([s.mean() for s in resultsAll['successRate0']])
    specificity0Mean = np.array([s.mean() for s in resultsAll['specificity0']])
    sensitivity0Mean = np.array([s.mean() for s in resultsAll['sensitivity0']])
    
    successRateMeanAll.append(successRateMean)
    specificityMeanAll.append(specificityMean)
    sensitivityMeanAll.append(sensitivityMean)
    successRate0MeanAll.append(successRate0Mean)
    specificity0MeanAll.append(specificity0Mean)
    sensitivity0MeanAll.append(sensitivity0Mean)

# Fit to discriminant as a function of count
# success on all of the data
numberFeaturesList = np.array([len(s) for s in comboList])

# Summarize the results

# show the performance indices for all combos of a particular criteria
print('Combo, success, specificity, sensitivity, wMean')
for countIndex in xrange(len(successRateMeanAll)):
    for (comboIndex, combo) in enumerate(comboList):
        if (len(combo) == 3):
    #        print('%s, %3.3f, %3.3f, %3.3f, %s' %(str(combo), successRateMeanAll[countIndex][comboIndex], \
    #            specificityMeanAll[countIndex][comboIndex], sensitivityMeanAll[countIndex][comboIndex], wMeanAllAll[countIndex][comboIndex]))
            print('%s, %d,  %3.3f, %3.3f, %3.3f' %(str(combo), countIndex, successRateMeanAll[countIndex][comboIndex], \
                specificityMeanAll[countIndex][comboIndex], sensitivityMeanAll[countIndex][comboIndex]))

# show the best 5 combinations at each level of transmission
print('Combo, success, specificity, sensitivity, wMean')

# NEW STUFF


# feature and target matrices for all transmission
X_AllTransmission = statsMatrix[:,:-1]
y_AllTransmission = statsMatrix[:,-1]

classifier = {}
classifier['lda'] = LDA()
classifier['lr'] = LogisticRegression()
classifierKeys = classifier.keys()

classifiersAllAll = []
scoresAllAll = []
topFeaturesIndicesAll = []
topFeatureScores = []

# go through list of count ranges
for (countRangeIndex, countRange) in enumerate(countRangesList):
    classifiersAll = []
    scoresAll = []
    
    print('Count Range %d, %d' %(countRange[0], countRange[1]))
    # Cut in count
    cutCount = (countMatrix > countRange[0]) & (countMatrix < countRange[1])
    
    # define feature target matrices    
    X = statsMatrix[cutCount,:][:,:-1]
    y = statsMatrix[cutCount,:][:,-1]
    
    # cycle through number of features from 1 to maximum number of features
    trainSize = 0.75
    for numFeaturesKeep in xrange(1, len(statsCollapseListALL)+1):
        print("Number of features %d" %numFeaturesKeep)
        ch2 = SelectKBest(chi2, k=numFeaturesKeep)
        ch2.fit(X, y)
        topFeaturesIndices = ch2.get_support(indices = True)
        topFeatureScores = np.array(ch2.scores_)[topFeaturesIndices]
        orderorder = np.argsort(topFeatureScores)[-1::-1]
        
        stringstring = ''
        for ooIndex, oo in enumerate(orderorder):
#            print(ooIndex)
            stringstring = stringstring + '%d) %s: %3.3f; ' %(ooIndex, statsCollapseListALL[oo], topFeatureScores[oo]) 
        print(stringstring)

        X_train, X_test, y_train, y_test = cross_validation.train_test_split(X[:,topFeaturesIndices], y, train_size = trainSize)
        
        classifiers = {}
        scores = {}
        for key in classifierKeys:
            classifier[key].fit(X_train, y_train)
            pred = classifier[key].predict(X_test)            
            p, r, fbeta_score, support = precision_recall_fscore_support(y_test, pred, average = 'micro')
            classifiers[key] = copy.deepcopy(classifier[key])
            print('%s, Precision %3.3f, Recall %3.3f, f %3.3f' %(key, p, r, fbeta_score))
            # save precision, recall, f1, and support, success
            scores[key] = [p, r, fbeta_score, support,  sum(y_test == pred) / float(len(pred))]

        scoresAll.append(scores)
        classifiersAll.append(classifiers)
    scoresAllAll.append(scoresAll)
    classifiersAllAll.append(classifiersAll)


# choose metric
datOut = {}
for countIndex in xrange(len(successRateMeanAll)):
    temp = sensitivityMeanAll[countIndex]
    bestList = np.argsort(temp)[::-1]
    for comboIndex in bestList[0:5]:
        combo = comboList[comboIndex]
    #        print('%s, %3.3f, %3.3f, %3.3f, %s' %(str(combo), successRateMeanAll[countIndex][comboIndex], \
    #            specificityMeanAll[countIndex][comboIndex], sensitivityMeanAll[countIndex][comboIndex], wMeanAllAll[countIndex][comboIndex]))
        print('%s, %d,  %3.3f, %3.3f, %3.3f' %(str(combo), countIndex, successRateMeanAll[countIndex][comboIndex], \
            specificityMeanAll[countIndex][comboIndex], sensitivityMeanAll[countIndex][comboIndex]))

datOutCopyList = \
    ['calTimeNum', 'badDetectorsMask', 'goodZspecMask','datasetAcquisitionTime','binThreshold', \
    'stats', 'datasetGroups', 'filenameList', 'calGainShift', 'goodDetectorsMask',
    'datasetMaterial', 'datasetDescription', 'goodZspecNameList', 'datasetTimeNum',
    'datasetTimeStr', 'datasetGroupsWidth', 'datasetRawWidth', 'goodDetectorsList',
    'badZspecMask', 'goodZspecIndices', 'gainExtrapolated', 'dat', 'badZspecNameList',
    'gainCorrection', 'badZspecIndices', 'badDetectorsList', 'datasetTime', 'datasetGroupsIndices', 'pulseRate', \
    'detectorList','groupNamesList','statsCollapsed','statsCollapseListALL','groupNamesExportList', \
    'comboListTemp','statsCollapsedMatrix','statsMatrix','countCollapsedMatrix','countMatrix', \
    'statsCollapsedMatrixIncludingLowStats', 'statsMatrixIncludingLowStats','countCollapsedMatrixIncludingLowStats', \
    'countMatrixIncludingLowStats','numTrials','countStepSize','windowSize','MaxCount',\
    'wMeanAllAll','successRateMeanAll','specificityMeanAll','sensitivityMeanAll', \
    'successRate0MeanAll','specificity0MeanAll','sensitivity0MeanAll'
    ]

for d in datOutCopyList:
    try:
        exec("datOut['%s'] = %s" %(d,d))
    except:
        print('%s does not exist. Skipping.' %d)

fullFilename = os.path.join(basepath, 'zspec2ndSet.dat')
with open(fullFilename ,'wb') as fid:
    print('Writing %s' %fullFilename)
    cPickle.dump(datOut, fid, protocol = 2)
    
#del datOut
