#Our cleaned data set
# If it isn't installed, install the kernlab package
library(kernlab)
data(spam)
dim(spam)
# Help in http://rss.acs.unt.edu/Rdoc/library/kernlab/html/spam.html
# Subsampling our data set
# We need to generate a test and training set (prediction)

split_train_test <- function (data_to_split){
  if(is.null(data_to_split) | !is.data.frame(data_to_split)){
    stop("split_train_test ha recibido NULL o no es un data.frame")
  }
  set.seed(3435)
  trainIndicator = rbinom(nrow(data_to_split),size=1,prob=0.5)
  train = data_to_split[trainIndicator==1,]
  test = data_to_split[trainIndicator==0,]
  res <- list("train"=train, "test"=test)
  return (res)
}

split <- split_train_test(spam)
train <- split$train
test <- split$test

print("##########     DIM     ##########")
print(dim(train))
print("##########     NAMES     ##########")
print(names(train))
print("##########     HEAD     ##########")
print(head(train))
print("##########     TABLE     ##########")
print(table(train$type)) 
print("##########     SUMMARY     ##########")
print(summary(train))

plot(train$capitalAve ~ train$type)
#suele ser normal hacer este tipo de transformaciones en los analisis exploratorios
plot(log10(train$capitalAve + 1) ~ train$type)
plot(log10(train[, 1:4] + 1))
#ya veremos los que es hyerachical clustring
hCluster = hclust(dist(t(train[, 1:57])))
plot(hCluster)
# hacemos el log 10 para verlo mejor y quitamos las cols 56 y 57 pq estas son numeros totale no porcentajes
hClusterUpdated = hclust(dist(t(log10(train[, 1:55] + 1))))
plot(hClusterUpdated)




# empezamos la prediccion NO COMPRENDO LO QUE SIGUE
train$numType = as.numeric(train$type) - 1
costFunction = function(x, y) {
  sum(x != (y > 0.5))
}
cvError = rep(NA, 55)
library(boot)
for (i in 1:55) {
  lmFormula = as.formula(paste("numType~", names(train)[i], sep = ""))
  glmFit = glm(lmFormula, family = "binomial", data = train)
  cvError[i] = cv.glm(train, glmFit, costFunction, 2)$delta[2]
}
min.error <- which.min(cvError)
names(train)[min.error]

# COMPARAMOS CON LOS DATOS DE TEST
predictionModel = glm(numType ~ charDollar, family = "binomial", data = train)
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
predictionTest = predict(predictionModel, test)
predictedSpam = rep("nonspam", dim(test)[1])
predictedSpam[predictionModel$fitted > 0.5] = "spam"
table.aux <- table(predictedSpam, test$type)
rate_error_falso_positivo <- table.aux[2,1]/(sum(table.aux[,1]))
rate_error_falso_negativo <- table.aux[1,2]/(sum(table.aux[,2]))