
source("binary_perceptron.R")
source("oneVersusAll.R")
###########################
# One Versus All PERCEPTRON
###########################

# NOTE: this variable depends on data from the startup script
# this variable is being used in both learn and predict
# a |people| by |TrainingData| matrix storing the binary label vectors (used for multiclass perceptron)
#OVA_Labels = getIndividualBinaryLabels(matrix(people), TrainingData)
# adapting the matrix for binary_perceptron.perceptron use
#OVA_Labels = ifelse(OVA_Labels, 1, -1)

# Params:
#   trainX: a matrix whose column vectors are the feature vectors
#   trainY: a boolean matrix generated by getIndividualBinaryLabels
# Returns: a list L, where
#   L$mistakeMarix = the mistakeIndicesMatrix
#   L$projectedTrainX = a matrix whose row vectors are the feature vectors in the eigenspace
#   L$kernelDeg= the degree of the polynomial kernel in the model
#   L$eigenVecs= the top k eigenvectors (row-stacked)
#   L$OVA_Labs = the one versus all binary labels
#   L$trainingMean = the mean of the column vectors in trainX
learnOVA <- function(trainX, trainY, param){
  # mean-center the trainX matrix
  trMean <- rowMeans(trainX)
  trainX <- trainX - trMean
  
  result <- learnHelperFuncOVA(trainX, trainY, k=110, kernDegree=param)
  result$trainingMean <- trMean
  
  return(result)
}
M_debug <- matrix(0, 20, 40, byrow=TRUE)

learnHelperFuncOVA <- function(trainX, trainY, k=10, kernDegree=3) {
  print(sprintf("OVA: Learning a model with k = %d and degree of polynomial kernel = %d...", k, kernDegree))
  
  e <- getEigenVect(trainX %*% t(trainX),k) #get our basis functions
  
  OVA_Labels = matrix(0,NROW(trainY), NCOL(trainY), byrow=TRUE)
  for(i in 1:NROW(trainY)){
    for(j in 1:NCOL(trainY)){
      if (trainY[i,j] == TRUE){
        OVA_Labels[i,j] = 1
      } else {
        OVA_Labels[i,j] = -1
      }
    }
  }
  
  # a matrix whose ith row R_i will be a boolean vector that corresponds to the mistake point
  # made by the i-th perceptron classifier (i.e. R_{ij} = TRUE iff j is a mistake point
  # for classifier i)   
  mistakeIndicesMatrix = matrix(numeric(0), 0, NCOL(trainX))
  
  
  #print("NROW of trainX")
  #print(NROW(trainX))
  
  # each row R_i will be a k-dimensional vector representing the projection of
  # the i-th mean-centered feature vector to the eigenspace
  projectedDataMatrix = matrix(numeric(0), 0, k)
  print("OVA: Projecting the training set onto the eigenspace...")
  
  for (i in 1:NCOL(trainX)){
    projectedDataMatrix = rbind(projectedDataMatrix, project(trainX[,i], e))
  }
  
  #print("NROW of projectedDataMatrix")
  #print(NROW(projectedDataMatrix))
  
  #perceptron(OVA_Labels[1], projectedDataMatrix, poly_kernel.deg)
  for (i in 1:length(people)){    
    # train a perceptron classifier with polynomial kernel
    # and record the mistake index vector
    perceptronMistakesVec = perceptron(OVA_Labels[i,], projectedDataMatrix, getPolyKernel(kernDegree))
    #print(paste("OVA.learnHelperFuncOVA: perceptronMistakesVec", people[i]))
    #print(paste("length of perceptronMistakesVec", length(perceptronMistakesVec)))
    #for (j in 1:length(perceptronMistakesVec)){
      #print(perceptronMistakesVec[j])
    #}
    mistakeIndicesMatrix = rbind(mistakeIndicesMatrix, perceptronMistakesVec)
  }

  #debug
  #print("OVA.learnHelperFuncOVA: mistakeIndicesMatrix")
  #print(mistakeIndicesMatrix)
  #write.table(mistakeIndicesMatrix, "mistakesOVA")
  return (list(mistakeMatrix=mistakeIndicesMatrix, projectedTrainX=projectedDataMatrix, kernelDeg=kernDegree, eigenVecs=e, OVA_Labs=OVA_Labels))
}

# Params:
#   testX: a single feature vector of the test point
# Returns: a single number (the predicted class index)
predictOVA <- function(testX, model){
  mistakeIndicesMatrix = model$mistakeMatrix
  projectedDataMatrix = model$projectedTrainX
  kernDegree = model$kernelDeg
  e = model$eigenVecs
  OVA_Labels = model$OVA_Labs
  trMean <- model$trainingMean
  
  # the i-th component will store the classification score (how far a data point is from the oriented hyperplane)
  # of the i-th perceptron classifier
  predictionScores = rep(0, length(people))
  
  # the projection of the mean-centered feature vector of the test data point
  projectedTestDatum = project(testX - trMean, e)
  
  for (i in 1:length(people)){      
    mistakeIndices = mistakeIndicesMatrix[i,]
    
    mistakeIndices = ifelse(mistakeIndices == 1, TRUE, FALSE)
    
    # a vector of the labels of all the mistake points
    classifierMistakeLabels = (OVA_Labels[i,])[mistakeIndices]
    
    #print("OVA:classifierMistakeLabels")
    #print(classifierMistakeLabels)
    # a matrix of the features of all the mistake points
    classifierMistakeFeatures = projectedDataMatrix[mistakeIndices,]
    
    # a vector whose dimension is equal to size of the current mistake set      
    # The j-th component of pre_kern is equal to y^j * \phi(x^j) \cdot \phi(x^t)
    # where y^t = classifierMistakeLabels[t], 
    #       x^t = the t-th row of classifierMistakeFeatures
    #       phi = the feature transformation function
    # the sum of all the components gives the classification score of this data point
    if (length(mistakeIndices) > 1) {
      pre_kern = classifierMistakeLabels * apply(classifierMistakeFeatures, 1, getPolyKernel(kernDegree), v=projectedTestDatum)
    } else {
      pre_kern = classifierMistakeLabels * (getPolyKernel(kernDegree))(classifierMistakeFeatures, projectedTestDatum)
    }
    
    
    predictionScores[i] = sum(pre_kern)
  }
  #debug
  #print("OVA:predictionScores from each classifier")
  #print(predictionScores)
  
  # the classification of this test data point
  # TODO: change to which.is.max?
  classifiedIndex = which.max(predictionScores)
  
  result <- rep(FALSE, length(people))
  result[classifiedIndex] <- TRUE
  return (result)  
}

###############################
# IMPLEMENTATION Test
###############################
# correct = 0              
# model <- learnOVA(A, c(1:42), 10)
# #debug
# correctlyClassified <- list()
# for (testDatum in TestData){
#  
#   classifiedIndex = predictOVA(getVector(testDatum), model)
#   # grab the name of the person in testDatum
#   testPersonName = gsub(".*faces/(.*)/.*", "\\1", testDatum)
#   
#   # if correctly recognized the individual, add the recognition score
#   if (people[classifiedIndex] == testPersonName){
#     print(sprintf("Correctly classified Test image %s", testDatum))
#     correct = correct + 1
#     #debug
#     correctlyClassified[[correct]] = testDatum
#   } else {
#     print(sprintf("Misclassified %s as %s", testPersonName, people[classifiedIndex]))
#     print(sprintf("Test image path: %s", testDatum))
#     
#   }
# }
# print(paste("Recognition Rate =", correct/length(TestData)))


#people <- c("ben", "jerry", "cameron")
#TrainingData <- c("faces/cameron/f1", "faces/jerry/f1", "faces/ben/f1", "faces/cameron/f2")
#print(getIndividualBinaryLabels(matrix(people), TrainingData))


# Original code
# for (k in kays_test){
#   e <- getEigenVect(A %*% t(A),k) #get our basis functions
#   
#   # each row R_i will be a boolean vector that corresponds to the mistake point
#   # made by the i-th perceptron classifier (i.e. R_{ij} = TRUE iff j is a mistake point
#   # for classifier i)   
#   mistakeIndicesMatrix = matrix(numeric(0), 0, length(TrainingData))
#   
#   # each row R_i will be a k-dimensional vector representing the projection of
#   # the i-th mean-centered feature vector to the eigenspace
#   projectedDataMatrix = matrix(numeric(0), 0, k)
#   for (i in 1:length(TrainingData)){
#     projectedDataMatrix = rbind(projectedDataMatrix, project(A[,i], e))
#   }
#   
#   deg = 3
#   #perceptron(OVA_Labels[1], projectedDataMatrix, poly_kernel.deg)
#   for (i in 1:length(people)){    
#     # train a perceptron classifier with polynomial kernel of degree 3
#     # and record the mistake index vector
#     mistakeIndicesMatrix = rbind(mistakeIndicesMatrix, (perceptron(OVA_Labels[i,], projectedDataMatrix, poly_kernel.deg)))
#     
#     #print(paste("mistakePoints for perceptron wrt person:", people[i]))
#     #print(TrainingData[ifelse(mistakeIndicesMatrix[i,], TRUE, FALSE)])
#   }
#   
#   correct = 0                         
#   for (testDatum in TestData){
#     # the i-th component will store the classification score (how far a data point is from the hyperplane)
#     # of the i-th perceptron classifier
#     predictionScores = rep(0, length(people))
#     
#     # the projection of the mean-centered feature vector of the test data point
#     projectedTestDatum = project(getVector(testDatum) - trainingMean, e)
#     
#     for (i in 1:length(people)){      
#       mistakeIndices = mistakeIndicesMatrix[i,]
#       
#       # a vector of the labels of all the mistake points
#       classifierMistakeLabels = (OVA_Labels[i,])[mistakeIndices]
#       
#       # a matrix of the features of all the mistake points
#       classifierMistakeFeatures = projectedDataMatrix[mistakeIndices,]
#       
#       # a vector whose dimension is equal to size of the current mistake set      
#       # The j-th component of pre_kern is equal to y^j * \phi(x^j) \cdot \phi(x^t)
#       # where y^t = classifierMistakeLabels[t], 
#       #       x^t = the t-th row of classifierMistakeFeatures
#       #       phi = the feature transformation function
#       # the sum of all the components gives the classification score of this data point
#       if (length(mistakeIndices) > 1) {
#         pre_kern = classifierMistakeLabels * apply(classifierMistakeFeatures, 1, poly_kernel.deg, v=projectedTestDatum)
#       } else {
#         pre_kern = classifierMistakeLabels * poly_kernel.deg(classifierMistakeFeatures, projectedTestDatum)
#       }
#       
#       predictionScores[i] = sum(pre_kern)
#     }
#     
#     # the classification of this test data point
#     classifiedIndex = which.max(predictionScores)
#     # grab the name of the person in testDatum
#     testPersonName = gsub(".*faces/(.*)/.*", "\\1", testDatum)
#     
#     # if correctly recognized the individual, add the recognition score
#     if (people[classifiedIndex] == testPersonName){
#       print(sprintf("Correctly classified Test image %s", testDatum))
#       correct = correct + 1
#     } else {
#       #print(sprintf("Misclassified %s as %s", testPersonName, people[classifiedIndex]))
#       #print(sprintf("Test image path: %s", testDatum))
#     }
#   }
#   recognitionRates[k] = correct / length(TestData)
# }


# This function is obsolete (see oneVersusAll.R)
# Params:
#   people: a list of individual names
#   TrainingData: a set of file names of images
# Returns:
#   A matrix of row vectors. For each vector v_i, its j-th component 
#     v_{ij} = 1  if the j-th file in TrainingData is the face of individual i, 
#            = -1 if the j-th file in TrainingData is NOT the face of individual i

# getIndividualBinaryLabels <- function(people, TrainingData){
#   numPpl = length(people)
#   numTrainData = length(TrainingData)
#   
#   result = matrix(0, numPpl, numTrainData)
#   
#   for (i in 1:numPpl){    
#     for (j in 1:numTrainData){
#       # grab the name of the person in TrainingData[j]
#       dataName = gsub(".*faces/(.*)/.*", "\\1", TrainingData[j])
#       
#       if(dataName == people[i]){
#         result[i, j] = 1
#       } else {
#         result[i, j] = -1
#       }
#     }    
#   }
#   
#   return (result)
# }
