
#non-weighted voting (where x is a matrix of booleans)
vote <- function(x) {
  if (NCOL(x) == 1) {
    return (x)
  }
  
  voter <- rep(0,NROW(x))
  for(i in 1:NCOL(x)) {
    index <- (1:NROW(x))[x[,i]]
    voter[index] = voter[index] + 1
  }
  return(which.max(voter) == (1:NROW(x)))
}

# NOTE: added an argument "e" here since e is no longer a global var
dist <- function(x, e) { #euclidean distance between x and y (curried)
  z <- project(x,e)
  function(y) { sqrt(sum((z-project(y,e))^2))}
}
dist <- function(x) { #euclidean distance between x and y (curried)
  function(y) { sqrt(sum((x-y)^2))}
}
getNN <- function(data,dist) getNNs(data,1,dist)

#return the k closest points in data to x, as measured by dist
getNNs <- function(data,k,dist) {
  d <- rep(0,NROW(data))
  for(i in 1:NCOL(data)) {
    d[i] <- dist(data[,i])
  }
  #d <- apply(matrix(data),2,dist)
  thresh <- sort(d)[k]
  return(d <= thresh)
}


#learner and predictor for cross validation harness (and general usefulness)
#learn:(trnx,trny,param) -> model and predict:(testx,model) -> prediction
learnkNN <- function(trnx,trny,param) {
  print(paste("----------------learning kNN",param))
  model <- list()
  model$x <- trnx
  model$y <- trny
  model$k <- param
  print("learned")
  return(model)
}

# requires: model has an element with name "eigenVecs" (row-stacked matrix of eigenvectors)
predictkNN <- function(testx,model) {
  nns <- as.matrix(getNNs(model$x,model$k,dist(testx)))
  #print(nns)
  #print((1:dim(model$x)[2])[nns])
  return(vote((model$y)[,nns]))
}
