##PBAR_Zspec_LoadAndProcessData.py
#
#  Loads data and calculates the stats
#
# Takes the place of PBAR_Zspec_load_Ver2.py
#
#   4/25/2013, John Kwong

#import csv
import numpy as np
#import numpy.matlib
#import datetime
#import time
import PBAR_Zspec
#from pandas import Series, DataFrame
#from mpl_toolkits.mplot3d import Axes3D
#import pandas as pd
#from sklearn.lda import LDA
#from sklearn import svm
import mlpy

# Set useful plot variables
plotColors = ['r', 'b', 'g', 'm', 'c', 'y', 'k'] * 10
lineStyles = ['-', '-.', ':', '_', '|'] *  10
markerTypes = ['.', 'o', 'v', '^', '<', '>', '1', '2', '3', '4', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd']
markerTypes = markerTypes * 2

######################################
##  LOAD DATA AND CALCULATE STATS   ##
######################################

# Set dataset location
basepath = r'C:\Users\jkwong\Documents\Work\PBAR\data'

# Create list of datasets
(filenameList, fullfilenameList) = \
               PBAR_Zspec.GenerateDefaultDatasetFilenameList(basepath)

# Create list of dataset groups
(datasetGroups, datasetGroupsIndices) = \
                PBAR_Zspec.GenerateDefaultDatasetGroupList(filenameList)

# Create list of good/bad detectors
(goodDetectorsList, badDetectorsList) = \
                    PBAR_Zspec.GenerateDefaultDetectorList()

# Load summary data
infoFilename = basepath + '\\' + 'datasetSummaryOLD.txt'

(datasetDescription, datasetAcquisitionTime, \
 datasetTime, datasetTimeNum, datasetTimeStr) = \
 PBAR_Zspec.GetDatasetInformation(infoFilename, filenameList)

# Load Zspec data
print "Loading Data"
dat = PBAR_Zspec.ReadZspec(fullfilenameList)

# Load Radiography data
(datRad, datRadZspec, radMap) = PBAR_Zspec.ReadRad(basepath)

## Make gain correction data
#countRange = np.array([0.7e3, 1e3])
#binBounds = [20, 120]
#PBAR_Zspec.CreateGainCorrectionFile(basepath, 'GainCorrection.csv', countRange, binBounds)

# Read in the Calibration file
(calTimeNum, calGainShift) = PBAR_Zspec.LoadGainCalibration(basepath + '\\' + 'GainCorrection.csv')

# Generate extrapolated gain matrix
gainExtrapolated = PBAR_Zspec.ExtrapolateGain(calTimeNum, calGainShift, datasetTimeNum)

# Calculate the spectra stats
binThreshold = 7
baselineDatasetIndex = 68
stats = PBAR_Zspec.CalculateStats(\
    dat, datasetAcquisitionTime, np.arange(256), gainExtrapolated, basepath, binThreshold, baselineDatasetIndex)

## Save values to file
#PBAR_Zspec.WriteStats(stats, filenameList, outputDir, 'd')


#########################################################
##   MAKE NICE ARRAYS TO PUT INTO LEARNING FUNCTIONS   ##
#########################################################

# Make a dictionary of dictionaries containing a values from a subset of the detectors.
#
# statsCollapsed[<dataset group name>][<statistics name>] = 
#        values for all detectors specified for all datasets in the group
#

# Detectors worth including
detectorList = goodDetectorsList[(goodDetectorsList > 40) & (goodDetectorsList < 120)]

# The dict containing the collapsed stuff will contain only these groups of data sets
groupNamesList = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT'))

# Make the dict
statsCollapsed = PBAR_Zspec.CreateStatsCollapsed(stats, groupNamesList, datasetGroupsIndices, detectorList)

# Collect only the stats we need into a dictionary separated by material class
#statsCollapseList = np.array((
            #'binMean_g', 'binSTD_g',\
            #'q_range_g', 'binMean_q50_g', \
            #'multibin_0_20_g', 'multibin_20_256_g', \
            #'multibin_20_ratio_g', 'transmission'))
#statsCollapseList = np.array((
            #'binMean_g', 'binMean_q50_g', \
            #'multibin_0_20_g', 'multibin_20_256_g', \
            #'multibin_20_ratio_g', 'transmission'))
#statsCollapseList = np.array((
            #'binMean_g1', 'binMean_q50_g1', \
            #'multibin_0_20_g1', 'multibin_20_256_g1', \
            #'multibin_20_ratio_g1', 'transmission'))

def FindCombinations(arrayIn):
    if (len(arrayIn) == 2):
        return([[arrayIn[0]],[arrayIn[1]], arrayIn ])
    else:
        # get list of combos not including the first element
        combos = FindCombinations(arrayIn[1:])
        print('Combos found')
        print combos
        combosAll = []
        combosAll.append([arrayIn[0]])
        for c in combos:
            print c
            combosAll.append(c)
            combosAll.append(c + [arrayIn[0]] )
        return(combosAll)


statsCollapseListALL = np.array(( \
    'binSTD_binMean', \
    'binMean_g1', 'binSTD_g1', \
    'binMean', 'binSTD', \
    'binMean_q50_g1', \
    'multibin_20_ratio_g1', 'transmission'))


statsCollapseList = np.array(( \
    'binSTD_binMean', \
    'binMean_g1', 'binSTD_g1', \
    'binMean', 'binSTD', \
    'binMean_q50_g1', \
    'multibin_20_ratio_g1', 'transmission'))

# Only include data from these groups for the test/training matrix
#groupNames = np.array(('Pb', 'Fe', 'Al'))
#groupNames = np.array(('PbALL', 'Fe', 'Al'))
groupNamesExportList = np.array(('PbALL', 'PbNOT'))

comboList = FindCombinations([0, 1, 2, 3, 4, 5, 6, 7])

# make containers

successAll = []
success0All = []
topStatsIndicesAll = []
wMeanAll = []

numTrials = 100
trainFraction = 0.20
includeTransmission = True

for combo in comboList:
    
    statsCollapseList = statsCollapseListALL[combo]
    # Make arrays with subset of stats and single 
    (statsCollapsedMatrix, statsMatrix) = PBAR_Zspec.CreateStatsCollapsedAll(statsCollapsed, statsCollapseList, groupNamesExportList)
    
    ###################
    ##   APPLY LDA   ##
    ###################
    # Set range of transmission to train on.
    
    transmissionRange = np.array((1000, 13000))
    transmissionRange = np.array((2200, 13000))
    ##transmissionRange = np.array((8500, 13000))
    ##transmissionRange = np.array((2200, 5200))
    transmissionRange = np.array((0, 2000))
    
    #(w, success, success0) = PBAR_Zspec.RunLDA(\
    #    statsMatrix, trainFraction, numTrials, transmissionRange, includeTransmission)
    (w, success, success0) = PBAR_Zspec.RunMlpyLDA(\
        statsMatrix, trainFraction, numTrials, transmissionRange, includeTransmission)
    
    wMean = w.mean(axis = 0)
    
    print("Success on whole training set: %3.3f" % success.mean())
    print("Success on all data: %3.3f" % success0.mean())
    
    # Define cut in transmission
    transmission = statsMatrix[:,-2]
    cutt = (transmission > transmissionRange[0]) & (transmission < transmissionRange[1])
    
    # To include or not include transmission matrix
    if includeTransmission:
        featuresAllTransmission = statsMatrix[:,0:-1] 
    else:
        featuresAllTransmission = statsMatrix[:,0:-2]
    targetsAllTransmission = statsMatrix[:,-1]
    
    # Cut in transmission
    features = statsMatrix[cutt,0:-1] # values, INCLUDE transmission
    targets = statsMatrix[cutt,-1] # classification
    
    # Display contributions in order of significance
    temp = np.argsort(abs(wMean) * features.mean(axis = 0))
    topStatsIndices = temp[::-1]
    for ii in range(len(topStatsIndices)):
        jj = topStatsIndices[ii]
        print statsCollapseList[jj], ": ",  (abs(wMean) * features.mean(axis = 0))[jj]
    
        # MAKE HISTROGRAMS OF THE DISTANCE
        # use mean of the w's
        dist0 = (np.matlib.repmat(wMean, featuresAllTransmission.shape[0], 1) * featuresAllTransmission).sum(axis = 1)
        
        distBinEdges = np.arange(0,700, 10)
        distBinCenters = (distBinEdges[1:] + distBinEdges[:-1])/2
        distCounts = dict()
        for ii in range(len(groupNamesExportList)):
            distCounts[groupNamesExportList[ii]], temp = np.histogram(dist0[statsMatrix[:,-1] == ii], bins = distBinEdges)
    print 'Done.'
    
    successAll.append(success)
    success0All.append(success0)
    topStatsIndicesAll.append(topStatsIndices)
    wMeanAll.append(wMean)


# summarize the training results
meanSuccess =  np.array([s.mean() for s in successAll])
# success on all of the data
meanSuccess0 =  np.array([s.mean() for s in success0All])
numberFeatures = np.array([len(s) for s in comboList])

bestOfParticularNumberFeatures = []
for num in xrange(1, 1+len(statsCollapseList)):
    print("number of features %d" % num)
    # create short list of all sets that have this number of features
    cut = numberFeatures == num
    indices = where(cut)[0]  # references the super set
    successes = meanSuccess[cut]
    ranking = argsort(successes)[-1::-1]  # in order from highest to lowest; in subset space
    #show the top 5 sets
    for i in xrange(5):
        listlist = comboList[indices[ranking[i]]]
        print statsCollapseList[listlist], meanSuccess[indices[ranking[i]]], wMeanAll[indices[ranking[i]]]

# examine the results
plt.figure()
plt.grid()
plt.plot(meanSuccess, label = 'Success on training set')
plt.plot(meanSuccess0, label = 'Success on all of data')
plt.xlabel('Training Set #')
plt.ylabel('Success')
plt.legend()


plt.figure()
plt.grid()
plt.plot(numberFeatures, meanSuccess, '.k', label = 'Success on training set', markersize = 15, alpha = 0.25)
plt.xlabel('# Features')
plt.ylabel('Success')
plt.legend()

