
# coding: utf-8

# In[ ]:

"""Part 1: Featurize categorical data using one-hot-encoding"""

from test_helper import Test

"""(1a) One-hot-encoding"""

#SampleData for manual OHE
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree =  [(0, 'bear'), (1, 'black'), (2, 'salmon')]
sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree])

"""The first feature indicates the type of animal (bear, cat, mouse); the second feature describes the animal's color (black, tabby); 
   and the third (optional) feature describes what the animal eats (mouse, salmon).
"""
sampleOHEDictManual = {}
smapleOHEDictManual[(0,'bear')] = 0
sampleOHEDictManual[(0,'cat')] = 1
sampleOHEDictManual[(0,'mouse')] = 2
sampleOHEDictManual[(1,'black')] = 3
sampleOHEDictManual[(1,'tabby')] = 4
sampleOHEDictManual[(2,'mouse')] = 5
sampleOHEDictManual[(2,'salmon')] = 6


# In[ ]:

"""(1b) Sparse vectors"""
import numpy as np
from pyspark.mllib.linalg import SparseVector
"""
   Use SparseVector to verify that both the sparse and dense representations yield the same results when computing dot products
"""
aDense = np.array([0., 3., 0., 4.])
aSparse = SparseVector(len(aDense),[(1,3.),(3,4.)])

bDense = np.array([0., 0., 0., 1.])
bSparse = SparseVector(len(bDense),[(3,1.)])

w = np.array([0.4, 3.1, -1.4, -.5])
print aDense.dot(w)
print aSparse.dot(w)
print bDense.dot(w)
print bSparse.dot(w)

#Test Sparse Vectors
Test.assertTrue(isinstance(sSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(aDense.dot(w) == aSparse.dot(w),
                'dot product of aDense and w should equal dot product of aSparse and w')
Test.assertTrue(bDense.dot(w) == bSparse.dot(w),
                'dot product of bDense and w should equal dot product of bSparse and w')


# In[ ]:

"""(1c) OHE features as sparse vectors"""

#   Using the mapping defined by the OHE dictionary from Part (1a), manually define OHE features for the three sample data points using 
#   SparseVector format
sampleOneOHEFeatManual = SparseVector(7, [(2,1.), (3,1.)])
sampleTwoOHEFeatManual = SparseVector(7, [(1,1.), (4,1.), (5,1.)])
sampleThreeOHEFeatManual = SparseVector(7, [(0,1.), (3,1.), (6,1.)])

#Test OHE Features as sparse vectors
Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector),
               'sampleOneOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector),
                'sampleTwoOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleThreeOHEFeatManual, SparseVector),
                'sampleThreeOHEFeatManual needs to be a SparseVector')


# In[ ]:

"""(1d) Define a OHE function"""
#Using the OHE dictionary to programatically generate OHE features from the original categorical data

def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
     """Produce a one-hot-encoding from a list of features and an OHE dictionary.

    Note:
        You should ensure that the indices used to create a SparseVector are sorted.

    Args:
        rawFeats (list of (int, str)): The features corresponding to a single observation.  Each
            feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
        OHEDict (dict): A mapping of (featureID, value) to unique integer.
        numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
            value).

    Returns:
        SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
            identifiers for the (featureID, value) combinations that occur in the observation and
            with values equal to 1.0.
    """
    sparseRepresentation = [(OHEDict[x], 1)for x in rawFeats ]
    return SparseVector(numOHEFeats,sparseRepresentation)

#Calcualte the number of features in sampleOHEDictManua
numSampleOHEFeats = len(sampleOHEDictManual.keys())

#Run oneHotEncoding on sampleOne
sampleOneOHEFeat = oneHotEncoding(sampleOne, smapleOHEDictManual, numsampleOHEFeats)

print sampleOneOHEFeat

#Test Define an OHE Function
Test.assertTrue(sampleOneOHEFeat == smapleOneOHEFeatManual,
               'sampleOneOHEFeat should equal sampleOneOHEFeatManual')
Test.assertEquals(sampleOneOHEFeat, SparseVector(7, [2,3], [1.0,1.0]),
                  'incorrect value for sampleOneOHEFeat')
Test.assertEquals(oneHotEncoding([(1, 'black'), (0, 'mouse')], sampleOHEDictManual,
                                 numSampleOHEFeats), SparseVector(7, [2,3], [1.0,1.0]),
                  'incorrect definition for oneHotEncoding')


# In[ ]:

"""(1e) Apply OHE to a dataset"""

#using the function from Part (1d) to create OHE features for all 3 data points in the sample dataset
sampleOHEData = sampleDataRDD.map(lambda rawData:oneHotEncoding(rawData, sampleOHEDictManual, numSampleOHEFeats))
print sampleOHEData.collect()

#Test Apply OHE to a dataset
sampleOHEDataValues = sampleOHEData.collect()
Test.assertTrue(len(sampleOHEDataValues) == 3, 'sampleOHEData should have three elements')
Test.assertEquals(sampleOHEDataValues[0], SparseVector(7, {2: 1.0, 3: 1.0}),
                  'incorrect OHE for first sample')
Test.assertEquals(sampleOHEDataValues[1], SparseVector(7, {1: 1.0, 4: 1.0, 5: 1.0}),
                  'incorrect OHE for second sample')
Test.assertEquals(sampleOHEDataValues[2], SparseVector(7, {0: 1.0, 3: 1.0, 6: 1.0}),
                  'incorrect OHE for third sample')


# In[ ]:

"""Part 2: Construct an OHE dictionary"""
"""
   (2a) Pair RDD of (features, category)
   To start, create an RDD of distinct (featureID, category) tuples
"""
sampleDistinctFeats = (sampleDataRDD
                      .flatMap(lambda x:x)).distinct()
print sampleDistinctFeats


# In[ ]:

#Test Pair RDD of (featureID, category)
Test.assertEquals(sorted(sampleDistinctFeats.collect()),
                 [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
                   (1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
                  'incorrect value for sampleDistinctFeats')


# In[ ]:

"""(2b) OHE Dictionary from distinct features"""
"""
   Creating an RDD of key-value tuples, where each(featureID, categroy) tuple in sampleDistinctFeats is a key and the values are distinct integers
   rating from 0 to number of keys-1, then convert this RDD into a dictionary
"""
sampleOHEDict = (sampleDistictFeats
                .zipWithIndex().collectAsMap())
print sampleOHEDict


# In[ ]:

# TEST OHE Dictionary from distinct features (2b)
Test.assertEquals(sorted(sampleOHEDict.keys()),
                  [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
                   (1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
                  'sampleOHEDict has unexpected keys')
Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values')


# In[ ]:

"""(2c) Automated creation of an OHE dictionary"""

def createOneHotDict(inputData):
    """Creates a one-hot-encoder dictionary based on the input data.

    Args:
        inputData (RDD of lists of (int, str)): An RDD of observations where each observation is
            made up of a list of (featureID, value) tuples.

    Returns:
        dict: A dictionary where the keys are (featureID, value) tuples and map to values that are
            unique integers.
    """
    return inputData.flatMap(lambda x:x).distinct(),zipWithIndex().collectAsMap()

sampleOHEDictAuto = createOneHotDict(sampleDataRDD)
print sampleOHEDictAuto


# In[ ]:

# TEST Automated creation of an OHE dictionary 
Test.assertEquals(sorted(sampleOHEDictAuto.keys()),
                  [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
                   (1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
                  'sampleOHEDictAuto has unexpected keys')
Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7),
                  'sampleOHEDictAuto has unexpected values')


# In[ ]:

"""Part 3: Parse CTR data and generate OHE features"""

#Obtaining the data
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('#', '#')
fileName = os.path.join(baseDir, inputPath)

if os.path.isfile(fileName):
    rawData = (sc.testFile(fileName, 2)
              .map(lambda x:x.replace('\t', ',')))  #work with either ',' or '\t' separated data
    print rawData.take(1)
    


# In[ ]:

"""(3a) Loading and splitting the data"""
weights = [.8, .1, .1]
seed =42

rawTrainData, rawValidationData, rawTestData = rawData.randomSplit(weights, seed = 42)

rawTrainData.cache()
rawValidationData.cache()
rawTestData.cache()

nTrain = rawTrainData.count()
nVal = rawValidationData.count()
nTest = rawTestData.count()

print nTrain, nVal, nTest, nTrain + nVal + nTest
print rawData.take(1)


# In[ ]:

"""(3b) Extract features"""
"""
   We will now parse the raw training data to create an RDD that we can subsequently use to create an OHE dictionary. Note that we will ignore the
   forst field(which is the 0-1 label), and parse the remaining fields
"""
def parsePoint(point):
    """Converts a comma separated string into a list of (featureID, value) tuples.

    Note:
        featureIDs should start at 0 and increase to the number of features - 1.

    Args:
        point (str): A comma separated string where the first value is the label and the rest
            are features.

    Returns:
        list: A list of (featureID, value) tuples.
    """
    pointSplit = point.split(',')
    return [(i-1,pointSplit[i]) for i in range(1, len(pointSplit))]

parsedTrainFeat = rawTrainData.map(parsePoint)
print parsedTrainFeat.take(1)

numCategories = (parsedTrainFeat
                .flatMap(lambda x:x)
                .distinct()
                .map(lambda x:(x[0], 1))
                .reduceByKey(lambda x, y: x + y)
                .sortByKey()
                .collect())

print numCategories[2][1]


# In[ ]:

"""(3c) Create an OHE dictionary from the dataset"""

"""
   Note that parsePoint returns a data point as a list of (featureID, category) tuples. Using this obsetvation, create an OHE dictionary using
   the function implemented in Part (2c)
"""
ctrOHEDict = createOneHotDict(parsedTrainFeat)

numCtrOHEFeats = len(ctrOHEDict.keys())
print numCtrOHEFeats
print ctrOHEDict[(0,'')]


# In[ ]:

"""(3d) Apply OHE to the dataset"""

"""
   Now let's use this OHE dictionary by starting with the raw training data and creating an RDD of LabeledPoint objects using OHE features
"""
from pyspark.mllib.regression import LabeledPoint

def parseOHEPoint(point, OHEDict, numOHEFeats):
    """Obtain the label and feature vector for this raw observation.
    
    Note:
        You must use the function `oneHotEncoding` in this implementation or later portions
        of this lab may not function as expected.

    Args:
        point (str): A comma separated string where the first value is the label and the rest
            are features.
        OHEDict (dict of (int, str) to int): Mapping of (featureID, value) to unique integer.
        numOHEFeats (int): The number of unique features in the training dataset.

    Returns:
        LabeledPoint: Contains the label for the observation and the one-hot-encoding of the
            raw features based on the provided OHE dictionary.
    """
    label = point.split(',')[0]
    parsedFeat = parsePoint(point)
    features = oneHotEncoding(parsedFeat, OHEDict, numOHEFeats)
    return LabeledPoint(label, features)

OHETrainData = rawTrainData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHETrainData.cache()
print OHETrainData.take(1)

#Check that oneHotEncoding function was used in parseOHEPoint
backupOneHot = oneHotEncoding
oneHotEncoding = None
withOneHot = False
try: parseOHEPoint(rawTrainData.take(1)[0], ctrOHEDict, numCtrOHEFeats)
except TypeError: withOneHot = True
oneHotEncoding = backupOneHot



# In[ ]:

# TEST Apply OHE to the dataset 
numNZ = sum(parsedTrainFeat.map(lambda x: len(x)).take(5))
numNZAlt = sum(OHETrainData.map(lambda lp: len(lp.features.indices)).take(5))
Test.assertEquals(numNZ, numNZAlt, 'incorrect implementation of parseOHEPoint')
Test.assertTrue(withOneHot, 'oneHotEncoding not present in parseOHEPoint')


# In[ ]:

"""Visualization 1: Feature frequency"""
def bucketFeatByCount(featCount):
    for i in ranges(11):
        size = 2**i
        if featCount <= size:
            return size
    return -1

featCounts = (OHETrainDta
             .flatMap(lambda lp: lp.features.indices)
             .map(lambda x :(x, 1))
             .reduceByKey(lambda x, y: x+y))

featCountsBuckets = (featCounts
                    .map(lambda x: (bucketFeatByCount(x[1], 1)))
                    .filter(lambda (k, v): k != -1)
                    .reduceByKey(lambda x, y: x + y)
                    .collect())

print featCountsBuckets

import matplotlib.pyplot as plt

x, y = zip(*featCountsBuckets)
x, y = np.log(x), np.log(y)

def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0):
    plt.close()
    fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
    ax.axes.tick_params(labelcolor='#999999', labelsize='10')
    for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
        axis.set_ticks_position('none')
        axis.set_ticks(ticks)
        axis.label.set_color('#999999')
        if hideLabels: axis.set_ticklabels([])
    plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
    map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
    return fig, ax
# generate layout and plot data
fig, ax = preparePlot(np.arange(0, 10, 1), np.arange(4, 14, 2))
ax.set_xlabel(r'$\log_e(bucketSize)$'), ax.set_ylabel(r'$\log_e(countInBucket)$')
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
pass


# In[ ]:

"""(3e) Handling unseen features"""
"""
   As some categorical values will likely appear in new data that did not exist in the training data. To deal with this situation, update the 
   oneHotEncoding() function from Patr(1d) to ignore previously unseen categories, and then compute OHE features for the validation data
"""
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
    """Produce a one-hot-encoding from a list of features and an OHE dictionary.

    Note:
        If a (featureID, value) tuple doesn't have a corresponding key in OHEDict it should be
        ignored.

    Args:
        rawFeats (list of (int, str)): The features corresponding to a single observation.  Each
            feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
        OHEDict (dict): A mapping of (featureID, value) to unique integer.
        numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
            value).

    Returns:
        SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
            identifiers for the (featureID, value) combinations that occur in the observation and
            with values equal to 1.0.
    """
    dictIndices = [OHEDict[feat] for feat in rawFeats if feat in OHEDict]
    dictIndices.sort()
    val = [1 for i in range(len(dictIndices))]
    return SparseVector(numOHEFeats, dictIndices, val)

OHEValidationData = rawValidationData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHEValidationData.cache()
print OHEValidationData.take(1)


# In[ ]:

"""Part 4: CTR prediction and logloss evaluation"""
"""(4a) Logistic regression"""

"""
   We are now ready to train our first CTR classifier. A natural classifier to use in this setting is logistic regression, since it models the 
   probability of a click-through event rather than returning a binary response, and when working with rare events, probabilistic predictions 
   are useful. First use LogisticRegressionWithSGD to train a model using OHETrainData with the given hyperparameter configuration.  
   LogisticRegressionWithSGD returns a LogisticRegressionModel. Next, use the LogisticRegressionModel.weights and 
   LogisticRegressionModel.intercept attributes to print out the model's parameters. Note that these are the names of the object's attributes 
   and should be called using a syntax like model.weights for a given model.
"""
from pyspark.mllib.classification import LogisticRegressionWithSGD

#fixed hyperparameters
numIters = 50
stepSize = 10.
regParam = 1e-6
regType = '12'
includeIntercept = True

model0 = LogisticRegressionWithSGD.train(OHETrainData, iterations = 50, step = 10., regParam = 1e-6, regType = '12', intercept = True)
sortedWeights = sorted(model0.weights)
print sortedWeights[:5], model0.intercept


# In[ ]:

"""(4b) Log Loss""" 
"""
   we will use log loss to evaluate the quality of models. Log loss is defined as:
              ℓlog(p,y)=−log(p) if y=1,ℓlog(p,y)=−log(1−p) if y=0
   where p is a probability between 0 and 1 and y is a label of either 0 or 1.
"""
from math import log

def computeLogLoss(p, y):
    """Calculates the value of log loss for a given probabilty and label.

    Note:
        log(0) is undefined, so when p is 0 we need to add a small value (epsilon) to it
        and when p is 1 we need to subtract a small value (epsilon) from it.

    Args:
        p (float): A probabilty between 0 and 1.
        y (int): A label.  Takes on the values 0 and 1.

    Returns:
        float: The log loss value.
    """
    epsilon = 10e-12
    return -y*log(p + epsilon) - (1 - y)*log(1 - p + epsilon)


# In[ ]:

"""(4c) Baseline log loss"""
"""
   Next we will use the function we wrote in Part (4b) to compute the baseline log loss on the training data. A very simple yet natural baseline
   model is one where we always make the same prediction independent of the given datapoint, setting the predicted value equal to the fraction 
   of training points that correspond to click-through events (i.e., where the label is one). Compute this value (which is simply the mean of the
   training labels), and then use it to compute the training log loss for the baseline model. The log loss for multiple observations is the mean
   of the individual log loss values.
"""

#Note that I wonder the code in this part can be optimizalized, but I do not figuare out 

classOneFracTrain = (OHETrainData.map(lambda x: x.label).reduce(lambda x, y: x+y))/(OHETrainData.map(lambda x: x.label).count())
print classOneFracTrain

logLossTrBase = (OHETrainData.map(lambda x:computeLogLoss(classOneFracTrain, x.label)).reduce(lambda x, y: x+y))/(OHETrainData.map(lambda x:x.label).count())
print 'Baseline Train Logloss = {0:.3f}\n'.format(logLossTrBase)


# In[ ]:

"""(4d) Predicted probability"""

from math import exp

def getP(x, w, intercept):
    """Calculate the probability for an observation given a set of weights and intercept.

    Note:
        We'll bound our raw prediction between 20 and -20 for numerical purposes.

    Args:
        x (SparseVector): A vector with values of 1.0 for features that exist in this
            observation and 0.0 otherwise.
        w (DenseVector): A vector of weights (betas) for the model.
        intercept (float): The model's intercept.

    Returns:
        float: A probability between 0 and 1.
    """
    rawPrediction = intercept + w.dot(x)
    
    #Bound the raw prediction value
    rawPrediction = min(rawPrediction, 20)
    rawPrediction = max(rawPrediction, -20)
    return 1/(1 + exp(-rawPrediction))

trainingPredictions = OHETrainData.map(lambda x:getP(x.features, model0.weights, model0.intercept))
print trainingPredictions.take(5)


# In[ ]:

"""(4e) Evaluate the model"""

def evaluateResults(model, data):
    """Calculates the log loss for the data given the model.

    Args:
        model (LogisticRegressionModel): A trained logistic regression model.
        data (RDD of LabeledPoint): Labels and features for each observation.

    Returns:
        float: Log loss for the data.
    """
    tolLogLoss = data.map(lambda x:computeLogLoss(getP(x.features, model.weights, model.intercept), x.label)).reduce(lambda x, y:x+y)
    numData = data.map(lambda x:x.label).count()
    
    return tolLogLoss/numData

logLossTrLRO = evaluateResults(model0, OHETrainData)
print('OHE Features Train Logloss:\n\tBaseline = {0:3f}\n\tLogReg = {1:.3f}'.format(logLossTrBase, logLossTrLRO))


# In[ ]:

"""(4f) Validation log loss"""
"""
   To compute the validation log loss for both the baseline and logistic regression models
"""
#Note that you'd better write a general function for Baseline model in Parts(4c) and it seems there are optimizations for logLossValBase
classOneFracVal = (OHEValidationData.map(lambda x:x.label).reduce(lambda x,y:x+y))/(OHEValidationData.map(lambda x:x.label).count())

logLossValBase = (OHEValidationData.map(lambda x:computeLogLoss(classOneFracVal, x.label)).reduce(lambda x,y:x+y))/(OHEValidationData.map(lambda x:x.label).count())

logLossValLRO = evaluateResults(model0, OHEValidationData)
                                                                                    


# In[ ]:

"""Visualization 2:ROC curve"""

"""
   We will now visualize how well the model predicts our target. To do this we generate a plot of the ROC curve. The ROC curve shows us the 
   trade-off between the false positive rate and true positive rate, as we liberalize the threshold required to predict a positive outcome. 
   A random model is represented by the dashed line.
"""
labelsAndScores = OHEValidationData.map(lambda lp:
                                            (lp.label, getP(lp.features, model0.weights, model0.intercept)))
labelsAndWeights = labelsAndScores.collect()
labelsAndWeights.sort(key=lambda (k, v): v, reverse=True)
labelsByWeight = np.array([k for (k, v) in labelsAndWeights])

length = labelsByWeight.size
truePositives = labelsByWeight.cumsum()
numPositive = truePositives[-1]
falsePositives = np.arange(1.0, length + 1, 1.) - truePositives

truePositiveRate = truePositives / numPositive
falsePositiveRate = falsePositives / (length - numPositive)

# Generate layout and plot data
fig, ax = preparePlot(np.arange(0., 1.1, 0.1), np.arange(0., 1.1, 0.1))
ax.set_xlim(-.05, 1.05), ax.set_ylim(-.05, 1.05)
ax.set_ylabel('True Positive Rate (Sensitivity)')
ax.set_xlabel('False Positive Rate (1 - Specificity)')
plt.plot(falsePositiveRate, truePositiveRate, color='#8cbfd0', linestyle='-', linewidth=3.)
plt.plot((0., 1.), (0., 1.), linestyle='--', color='#d6ebf2', linewidth=2.)  # Baseline model
pass


# In[ ]:

"""Part 5: Reduce feature dimension via feature hashing"""

"""(5a) Hash function"""
"""
   Below is the hash function and we will first use this hash function with the three sample data points from Part(1a) to gain some intuition
"""
from collections import defaultdict
import hashlib
 
def hashFunction(numBuckets, rawFeats, printMapping=False):
    """Calculate a feature dictionary for an observation's features based on hashing.

    Note:
        Use printMapping=True for debug purposes and to better understand how the hashing works.

    Args:
        numBuckets (int): Number of buckets to use as features.
        rawFeats (list of (int, str)): A list of features for an observation.  Represented as
            (featureID, value) tuples.
        printMapping (bool, optional): If true, the mappings of featureString to index will be
            printed.

    Returns:
        dict of int to float:  The keys will be integers which represent the buckets that the
            features have been hashed to.  The value for a given key will contain the count of the
            (featureID, value) tuples that have hashed to that key.
    """
    mapping = {}
    for ind, category in rawFeats:
        featureString = category + str(ind)
        mapping[featureString] = int(int(hashlib.md5(featureString).hexdigest(), 16) % numBuckets)
    if(printMapping): print mapping
    sparseFeatures = defaultdict(float)
    for bucket in mapping.values():
        sparseFeatures[bucket] += 1.0
    return dict(sparseFeatures)

# Use four buckets
sampOneFourBuckets = hashFunction(4, sampleOne, True)
sampTwoFourBuckets = hashFunction(4, sampleTwo, True)
sampThreeFourBuckets = hashFunction(4, sampleThree, True)

# Use one hundred buckets
sampOneHundredBuckets = hashFunction(100, sampleOne, True)
sampTwoHundredBuckets = hashFunction(100, sampleTwo, True)
sampThreeHundredBuckets = hashFunction(100, sampleThree, True)

print '\t\t 4 Buckets \t\t\t 100 Buckets'
print 'SampleOne:\t {0}\t\t {1}'.format(sampOneFourBuckets, sampOneHundredBuckets)
print 'SampleTwo:\t {0}\t\t {1}'.format(sampTwoFourBuckets, sampTwoHundredBuckets)
print 'SampleThree:\t {0}\t {1}'.format(sampThreeFourBuckets, sampThreeHundredBuckets)


# In[ ]:

"""(5b) Creating hashed features"""

def parseHashPoint(point, numBuckets):
    """Create a LabeledPoint for this observation using hashing.

    Args:
        point (str): A comma separated string where the first value is the label and the rest are
            features.
        numBuckets: The number of buckets to hash to.

    Returns:
        LabeledPoint: A LabeledPoint with a label (0.0 or 1.0) and a SparseVector of hashed
            features.
    """
    label = point.split(',')[0]
    parsedFeat = parsePoint(point)
    hashDict = hashFunction(numBuckets, parsedFeat, True)
    return LabeledPoint(label, features)

numBucketsCTR = 2**15
hashTrainData = rawTrainData.map(lambda x:parseHashPoint(x, numBucketsCTR))
hashTrainData.cache()
hashValidationData = rawValidationData.map(lambda x:parseHashPoint(x, numBucketsCTR))
hashValidationData.cache()
hashTestData = rawTestData.map(lambda x:parseHashPoint(x, numBucketsCTR))
hashTestData.cache()

print hashTrainData.take(1)


# In[ ]:

"""(5c) Sparsity"""

def computeSparsity(data, d, n):
    """Calculates the average sparsity for the features in an RDD of LabeledPoints.

    Args:
        data (RDD of LabeledPoint): The LabeledPoints to use in the sparsity calculation.
        d (int): The total number of features.
        n (int): The number of observations in the RDD.

    Returns:
        float: The average of the ratio of features in a point to total features.
    """
    return data.map(lambda x:float(len(x.features.values))).sum()/d/n

averageSparsityHash = computeSparsity(hashTrainData, numBucketsCTR, nTrain)
averageSparsityOHE = computeSparsity(OHETrainData, numCtrOHEFeats, nTrain)

print 'Average OHE Sparsity: {0:.7e}'.format(averageSparsityOHE)
print 'Average Hash Sparsity: {0:.7e}'.format(averageSparsityHash)


# In[ ]:

"""(5d) Logistic model with hashed features"""

"""
   Now let's train a logistic regression model using the hashed features. Run a grid search to find suitable hyperparameters for the hashed 
   features, evaluating via log loss on the validation data. Note: This may take a few minutes to run. Use 1 and 10 for stepSizes and 1e-6 
   and 1e-3 for regParams.
"""
numIters = 500
regType = 'l2'
includeIntercept = True

# Initialize variables using values from initial model training
bestModel = None
bestLogLoss = 1e10

stepSizes = range(1, 11)
regParams = [1e-6,1e-3]
for stepSize in stepSizes:
    for regParam in regParams:
        model = (LogisticRegressionWithSGD
                .train(hashTrainData, numIters, stepSize, regParam=regParam, regType=regType, intercept=includeIntercept))
        logLossVa = evaluateResults(model, hashValidationData)
        print ('\tstepSize = {0:.1f}, regParam = {1:.0e}: logloss = {2:.3f}'
               .format(stepSize, regParam, logLossVa))
        if(logLossVa < bestLogLoss):
            bestModel = model
            bestLogLoss = logLossVa
print ('Hashed Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
       .format(logLossValBase, bestLogLoss))            


# In[ ]:

"""Visualization 3: Hyperpatameter heat map"""

from matplotlib.colors import LinearSegmentedColormap

# Saved parameters and results.  Eliminate the time required to run 36 models
stepSizes = [3, 6, 9, 12, 15, 18]
regParams = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
logLoss = np.array([[ 0.45808431,  0.45808493,  0.45809113,  0.45815333,  0.45879221,  0.46556321],
                    [ 0.45188196,  0.45188306,  0.4518941,   0.4520051,   0.45316284,  0.46396068],
                    [ 0.44886478,  0.44886613,  0.44887974,  0.44902096,  0.4505614,   0.46371153],
                    [ 0.44706645,  0.4470698,   0.44708102,  0.44724251,  0.44905525,  0.46366507],
                    [ 0.44588848,  0.44589365,  0.44590568,  0.44606631,  0.44807106,  0.46365589],
                    [ 0.44508948,  0.44509474,  0.44510274,  0.44525007,  0.44738317,  0.46365405]])

numRows, numCols = len(stepSizes), len(regParams)
logLoss = np.array(logLoss)
logLoss.shape = (numRows, numCols)

fig, ax = preparePlot(np.arange(0, numCols, 1), np.arange(0, numRows, 1), figsize=(8, 7),
                      hideLabels=True, gridWidth=0.)
ax.set_xticklabels(regParams), ax.set_yticklabels(stepSizes)
ax.set_xlabel('Regularization Parameter'), ax.set_ylabel('Step Size')

colors = LinearSegmentedColormap.from_list('blue', ['#0022ff', '#000055'], gamma=.2)
image = plt.imshow(logLoss,interpolation='nearest', aspect='auto',
                    cmap = colors)
pass


# In[ ]:

"""(5e) Evaluate on the test set"""

#Log loss for the best model from
logLossTest = evaluateResults(bestModel, hashTestData)

#Log loss for the baseline model
OHETestData = rawTestData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
classOneFracTest = (OHETestData.map(lambda x:x.label).reduce(lambda x,y:x+y))/(OHETestData.map(lambda x:x.label).count())


logLossTestBaseline = (OHETestData.map(lambda x:computeLogLoss(classOneFracTest, x.label)).reduce(lambda x,y:x+y))/(OHETestData.map(lambda x:x.label).count())
#logLossTestBaseline = evaluateResults(model0, OHETestData)

print ('Hashed Features Test Log Loss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
       .format(logLossTestBaseline, logLossTest))

