##PBAR_Zspec_SimulateSpectraFirstTest.py
# Generates fake lower statistics data.
# First run PBAR_Zspec_Load_Ver2.py and PBAR_Zspec_ProcessData_Ver2.py
#
#  4/17/2013 

import numpy as np
##parameters
detectorNo = 68

# simulation parameters
acquisitionTimeMC = 1
numberTrials = 100
lowerBinThreshold = 7

statsMC = dict()
statsList = ('binSum', \
             'binMean', 'binSTD', 'binSTD_binMean',\
            'q_10', 'q_25', 'q_50', 'q_75', 'q_90', \
            'q_range', 'q_range_ratio', 'binMeanSq', 'binMean_q50', \
             \
            'binMean_g', 'binSTD_g',\
            'q_10_g', 'q_25_g', 'q_50_g', 'q_75_g', 'q_90_g', \
            'q_range_g', 'binMeanSq_g', 'binMean_q50_g', \
             \
            'multibin_0_10', 'multibin_10_256', 'multibin_0_20', 'multibin_20_256', \
            'multibin_10_ratio', 'multibin_20_ratio', \
             \
            'multibin_0_10_g', 'multibin_10_256_g', 'multibin_0_20_g', 'multibin_20_256_g', \
            'multibin_10_ratio_g', 'multibin_20_ratio_g', \
            'transmission')

for i in range(0,len(statsList)):
    statsMC[statsList[i]] = np.zeros((len(dat), numberTrials))

binArray = np.matlib.repmat(np.arange(0,256),numberTrials,1).T

# cycle through the dataset groups
for ii in range(len(dat)):

    disp(ii)
    # make temporary array
    datTemp = dat[ii].astype(float)

    # container for the MC spectra
    datMC = np.zeros((256, numberTrials))
    
    acquisitionTime = datasetAcquisitionTime[ii]
    totalCounts = datTemp[:,detectorNo].sum().astype(float)
    numberCountsMC = totalCounts * acquisitionTimeMC / acquisitionTime / 2.0

    # select spectrum from one particular detector and turn it into pdf by normalizing
    probdist = datTemp[:,detectorNo].astype(float) / datTemp[:,detectorNo].sum()

    # Run Trials
    for trialNo in range(numberTrials):
        countsTemp = round(numberCountsMC + np.random.randn() * sqrt(numberCountsMC))
        bins = np.random.choice(np.arange(256), size=countsTemp, replace=True, p=probdist)
        counts, binEdges = np.histogram(bins, bins = binEdges)
        datMC[:,trialNo] = counts

    # CALCULATE THE STATISTICS

    # remove the lower bins by setting it to zero
    datMC[0:lowerBinThreshold,:] = 0

    # Find the quantile bins
    datTempCummulativeSum = datMC.cumsum(axis = 0) # cumulative sum
    datTempCummulativeSum = datTempCummulativeSum / np.matlib.repmat(datTempCummulativeSum[-1,:], 256, 1) # normalize the cumulative sum

    statsMC['q_10'][ii,:] = np.argmin(abs(datTempCummulativeSum - 0.10), axis = 0)
    statsMC['q_25'][ii,:] = np.argmin(abs(datTempCummulativeSum - 0.25), axis = 0)
    statsMC['q_50'][ii,:] = np.argmin(abs(datTempCummulativeSum - 0.50), axis = 0)
    statsMC['q_75'][ii,:] = np.argmin(abs(datTempCummulativeSum - 0.75), axis = 0)
    statsMC['q_90'][ii,:] = np.argmin(abs(datTempCummulativeSum - 0.90), axis = 0)
    # quantile range
    statsMC['q_range'][ii,:] = statsMC['q_75'][ii,:] - statsMC['q_25'][ii,:]
    statsMC['q_range_ratio'][ii,:] = statsMC['q_range'][ii,:] / statsMC['q_50'][ii,:]

    # calculate the mean and spread    
    statsMC['binMean'][ii,:] = (binArray * datMC).sum(axis = 0).astype(float) / datMC.sum(axis = 0).astype(float)
    statsMC['binMeanSq'][ii,:] = ((binArray**2) * datMC).sum(axis = 0).astype(float) / datMC.sum(axis = 0).astype(float)
    
    statsMC['binSum'][ii,:] = datMC.sum(axis = 0).astype(float)

    statsMC['binMean_q50'][ii,:] = statsMC['binMean'][ii,:] - statsMC['q_50'][ii,:]

    temp = np.matlib.repmat(statsMC['binMean'][ii,:], datMC.shape[0],1)  # array of means, 256 x number detectors

    statsMC['binSTD'][ii,:] = np.sqrt(  ( ((binArray - temp)**2) * datMC).sum(axis = 0).astype(float) / datMC.sum(axis = 0).astype(float)  )

    statsMC['binSTD_binMean'][ii,:] = statsMC['binSTD'][ii,:] / statsMC['binMean'][ii,:]

    # CALCULATE THE GAIN CORRECTED parameters
    baselineIndex = datasetGroupsIndices['CC'][0]

    # Unlike in the real script, these are single value as we are focused on one detector
    g = binMeanCCExtrapolated[baselineIndex,detectorNo] / binMeanCCExtrapolated[ii,detectorNo]
    
    statsMC['q_10_g'][ii,:] = statsMC['q_10'][ii,:] * g
    statsMC['q_25_g'][ii,:] = statsMC['q_25'][ii,:] * g
    statsMC['q_50_g'][ii,:] = statsMC['q_50'][ii,:] * g
    statsMC['q_75_g'][ii,:] = statsMC['q_75'][ii,:] * g
    statsMC['q_90_g'][ii,:] = statsMC['q_90'][ii,:] * g
    statsMC['q_range_g'][ii,:] = statsMC['q_range'][ii,:] * g
    statsMC['binMean_g'][ii,:] = statsMC['binMean'][ii,:] * g
    statsMC['binMeanSq_g'][ii,:] = statsMC['binMeanSq'][ii,:] * g
    statsMC['binMean_q50_g'][ii,:] = statsMC['binMean_q50'][ii,:] * g
    statsMC['binSTD_g'][ii,:] = statsMC['binSTD'][ii,:] * g
    
    # MULTIBIN PARAMETERS
    # Calculate histogram splits - no rebinning so no correction
    statsMC['multibin_0_10'][ii,:] = datMC[0:10,:].sum(axis=0)  # bins 0 to 9, so first ten bins
    statsMC['multibin_10_256'][ii,:] = datMC[10:,:].sum(axis=0)
    statsMC['multibin_0_20'][ii,:] = datMC[0:20,:].sum(axis=0)
    statsMC['multibin_20_256'][ii,:] = datMC[20:,:].sum(axis=0)

    statsMC['multibin_10_ratio'][ii,:]  = statsMC['multibin_0_10'][ii,:]  /statsMC['multibin_10_256'][ii,:]
    statsMC['multibin_20_ratio'][ii,:]  = statsMC['multibin_0_20'][ii,:]  /statsMC['multibin_20_256'][ii,:]

    # Calculate the histogram splits with correction for temporal gain shift
    baselineIndex = datasetGroupsIndices['CC'][0]
    binCorrection = binMeanCCExtrapolated[ii,detectorNo]/binMeanCC[baselineIndex,detectorNo] # current/baseline

    # bin 10
    thresholdMat = round(binCorrection*9.0)  # threshold
    cutt1 = binArray <= thresholdMat
    cutt1 = cutt1.astype(float)
    cutt2 = binArray > thresholdMat
    cutt2 = cutt2.astype(float)
    
    statsMC['multibin_0_10_g'][ii,:] = (datMC * cutt1).sum(axis=0)
    statsMC['multibin_10_256_g'][ii,:] = (datMC * cutt2).sum(axis=0)

    # bin 20
    thresholdMat = round(binCorrection*19.0)   # threshold
    cutt1 = binArray <= thresholdMat
    cutt1 = cutt1.astype(float)
    cutt2 = binArray > thresholdMat
    cutt2 = cutt2.astype(float)

    statsMC['multibin_0_20_g'][ii,:] = (datMC * cutt1).sum(axis=0)
    statsMC['multibin_20_256_g'][ii,:] = (datMC * cutt2).sum(axis=0)

    statsMC['multibin_10_ratio_g'][ii,:]  = statsMC['multibin_0_10_g'][ii,:]  /statsMC['multibin_10_256_g'][ii,:]
    statsMC['multibin_20_ratio_g'][ii,:]  = statsMC['multibin_0_20_g'][ii,:]  /statsMC['multibin_20_256_g'][ii,:]

    # transmission value - sum spectra / acquisition time
    statsMC['transmission'][ii,:] = datMC.sum(axis = 0) / acquisitionTimeMC



##
# MAKE NICE ARRAYS TO PUT INTO LEARNING FUNCTIONS
##


#  MAKE A DICTIONARY CONTAINING ALL THE DISCRIMINANTS FOR A SUBSET OF DETECTORS
#
# it is a dictionary of dictionaries
#
# statsCollapsed[<dataset group name>][<statistics name>] = values for all detectors specified for all datasets in the group
#
#   each column contains a 
detectorList = goodDetectorsList.copy()
detectorList = detectorList[(detectorList>40) & (detectorList < 120)]

materialType = ()
statsCollapsed = dict()

detectorNumberArray = np.matlib.repmat(np.arange(137), len(dat), 1)

numberDetectors = dat[0].shape[1]
numberDetectors = len(detectorList)

groupNames = np.array(('PbALL', 'Pb', 'Fe', 'Al', 'PbNOT'))
for ii in range(0, len(groupNames)): # cycle through groups of datasets
    group = groupNames[ii];
    statsCollapsed[group] = dict()
    for kk in range(0,len(statsList)):  # cycle through stats in a datasets
        stat = statsList[kk]
        statsCollapsed[group][stat] = np.zeros(len(datasetGroups[groupNames[ii]]) * numberDetectors)
        statsCollapsed[group]['detectorNumber'] = np.zeros(len(datasetGroups[groupNames[ii]]) * numberDetectors)
        for jj in range(0, len(datasetGroups[groupNames[ii]])): # cycle through datasets in group
            startIndex = jj * numberDetectors
            stopIndex = (jj+1) * numberDetectors
            index = datasetGroupsIndices[groupNames[ii]][jj]
            arrayName = statsList[kk]
            # reshape the values into a 1-d array
            temp = stats[arrayName][index,detectorList]
            statsCollapsed[group][stat][startIndex:stopIndex] = temp.reshape(np.size(temp))
            temp = detectorNumberArray[index,detectorList]
            statsCollapsed[group]['detectorNumber'][startIndex:stopIndex] = temp.reshape(np.size(temp))

# collect only the stats we need into a dictionary separated by material class
statsCollapseList = np.array((
            'binMean_g', 'binSTD_g',\
            'q_range_g', 'binMean_q50_g', \
            'multibin_0_20_g', 'multibin_20_256_g', \
            'multibin_20_ratio_g', 'transmission'))
statsCollapseList = np.array((
            'binMean_g', 'binMean_q50_g', \
            'multibin_0_20_g', 'multibin_20_256_g', \
            'multibin_20_ratio_g', 'transmission'))

statsCollapseList = np.array((
            'binMean_g1', 'binMean_q50_g1', \
            'multibin_0_20_g1', 'multibin_20_256_g1', \
            'multibin_20_ratio_g1', 'transmission'))

statsCollapsedAll = dict()
for ii in range(0, len(groupNames)): # cycle through groups of datasets
    group = groupNames[ii];
    statsCollapsedAll[group] = np.zeros((len(datasetGroups[groupNames[ii]]) * numberDetectors, len(statsCollapseList)))
    for kk in range(0,len(statsCollapseList)):  # cycle through stats in a datasets
        stat = statsCollapseList[kk]
        statsCollapsedAll[group][:,kk] = statsCollapsed[group][stat]  # big array with all the stuff


# make nice array for putting in LDA function
groupNames = np.array(('Pb', 'Fe', 'Al'))
groupNames = np.array(('PbALL', 'Fe', 'Al'))
groupNames = np.array(('PbALL', 'PbNOT'))

##groupNames = np.array(('PbALL', 'PbNOT'))

# right most column is the classification
for ii in range(0, len(groupNames)):
    group = groupNames[ii]
    if (ii == 0):
        statsCollapsedAllAll = np.hstack((statsCollapsedAll[group], ii + np.zeros((statsCollapsedAll[group].shape[0], 1))))
    else:
        statsCollapsedAllAll = np.vstack( (statsCollapsedAllAll, ii + np.hstack( (statsCollapsedAll[group], np.zeros((statsCollapsedAll[group].shape[0], 1))))) )

