# (TODO) Model the Tantrev variable and interactions better
# (TODO) The cut point of 0.09 on the Tantrev variable is too low, 
# (TODO) the first 4 buckets have no losses in them
# (TODO) Try more Tantrev interactions, other interactions
# (TODO) Try directly using L1 logistic regression
# (TODO) Try to capture a few more losses by modeling losses with Tantrev == 0 
# (TODO) Is f271 another golden feature?


# Functions

ImputeMean <- function(x) {
  # Replace NAs in x with the mean of the non-NA values
	x[is.na(x)] <- mean(na.omit(x))
	x
}


Quantilize <- function(x, n = 20, change.levels = FALSE) {
  # Cut x into a factor with breaks at the n quantiles
  # Rename the levels of the result sequentially if 
  # change.levels = T
  qntls <- unique(quantile(x, seq(0, 1, 1/n)))
  print(length(qntls))
  if (length(qntls) > 1) {
    if (change.levels) {
      cut(x, qntls, 1:(length(qntls) - 1), include.lowest=T)
    } else {
      cut(x, qntls, include.lowest=T)
    }
  } else {
    as.factor(rep(1, length(x)))}
}


Quantilize2 <- function(x, y, n=20, change.levels = FALSE) {
  # Quantiles of x are used to cut y into quantiles.
  qntls <- unique(quantile(x, seq(0, 1, 1/n)))
  print(length(qntls))
  if (length(qntls) > 1) {
    if (change.levels) {
      cut(y, qntls, 1:(length(qntls) - 1),  include.lowest=T)
    } else {
      cut(y, qntls, include.lowest=T)}
  } else {
    as.factor(rep(1, length(y)))}
}


# Read the training set
setwd("~/accept_decline")
df <- readRDS("train.rds")
df <- data.frame(lapply(df, ImputeMean))
# Add an indicator varible for loss > 0
df <- data.frame(df, loss_ind=as.integer(df$loss > 0))

# Compute the Tantrev variable
m <- glm(formula = loss_ind ~ f274 + f528, family = binomial(), data = df)
pred.tantrev <- predict(m, type="response")

# Plot the loss frequency versus Tantrev
a <- Quantilize(pred.tantrev, n = 100)
plot(tapply(df$loss_ind, a, mean))
sum(df$loss_ind[pred.tantrev > .09])/sum(df$loss_ind) # (TODO) 0.09 is too low

# Subset and quantilize the training set
df2 <- df[pred.tantrev > .09, ]
pred.tantrev2 <- pred.tantrev[pred.tantrev > .09]
a <- Quantilize(pred.tantrev2, 5)
df_quantile <- lapply(df2[, 2:(ncol(df2) - 2)], Quantilize, n = 20, 
                      change.levels = T)
df_quantile <- data.frame(df_quantile, tantrev = a, loss_ind = df2$loss_ind, 
                          loss = df2$loss)

# Create the loss model
# Reducing number of variables by cross-validation doesn't seem to help
# (TODO) Structure of the tantrev variable should be reviewed, looks too coarse
m_fine <- glm(loss/100 ~ tantrev * f2 + f13 + f404 + f543 + f588 + f597 +
			                   f611 + f615 + f670 + f674 + f681 + f683 + f712 +
                         f715 + f726 + f727 + f772,
	            data = df_quantile, family = quasibinomial())
pred <- predict(m_fine, type="response")

# Plot a model lift chart
plot(quantile(pred * 100, seq(0, 1, .05)), type="l")
b <- Quantilize(pred * 100, 20)
points(tapply(df_quantile$loss, b, mean))

# We need the medians for use later with the test set
tapply(df_quantile$loss, b, median)

#Compute MAE on the training data
midpoints = tapply(df_quantile$loss, b, median)
df_ans_trn = data.frame(id=df2$id, midpoints[as.integer(b)])
df_ans_trn <- merge(data.frame(id=df$id), df_ans_trn, by="id", all.x=T)
names(df_ans_trn) <- c("id", "pred_loss")
df_ans_trn$pred_loss[is.na(df_ans_trn$pred_loss)] <- 0
# All zeros benchmark
mean(df$loss)
# Current model
mean(abs(df_ans_trn$pred_loss - df$loss))
# Improvement over all zeros benchmark
mean(df$loss) - mean(abs(df_ans_trn$pred_loss - df$loss))


# Now need to apply the model to test, which means test variables 
# must be Quantilized
df_test <- readRDS("test.rds")
keep.columns <- c("f2", "f13", "f404", "f543", "f588", "f597", "f611", "f615",
                  "f670", "f674", "f681", "f683", "f712", "f715", "f726", 
                  "f727", "f772", "f274", "f528")
test.imputed <- lapply(df_test[, keep.columns], ImputeMean)
df_test <- data.frame(id=df_test$id, test.imputed)

# Subset the test set on the Tantrev variable
pred_test_tantrev <- predict(m, newdata=df_test, type="response")
df_test2 <- df_test[pred_test_tantrev > .09, ]
pred_test_tantrev2 <- pred_test_tantrev[pred_test_tantrev > .09]
#b <- cut(pred_test_tantrev2, quantile(pred.tantrev2, seq(0, 1, .2)), include.lowest=T)
b <- Quantilize2(pred.tantrev2, pred_test_tantrev2, 5, F)

# Quantilize relevant test set variables
df_test_quantile <- data.frame(id=df_test2$id, 
f2=Quantilize2(df2$f2, df_test2$f2, change.levels=T),
f13=Quantilize2(df2$f13, df_test2$f13, change.levels=T),
f404=Quantilize2(df2$f404, df_test2$f404, change.levels=T),
f543=Quantilize2(df2$f543, df_test2$f543, change.levels=T),
f588=Quantilize2(df2$f588, df_test2$f588, change.levels=T),
f597=Quantilize2(df2$f597, df_test2$f597, change.levels=T),
f611=Quantilize2(df2$f611, df_test2$f611, change.levels=T),
f615=Quantilize2(df2$f615, df_test2$f615, change.levels=T),
f670=Quantilize2(df2$f670, df_test2$f670, change.levels=T),
f674=Quantilize2(df2$f674, df_test2$f674, change.levels=T),
f681=Quantilize2(df2$f681, df_test2$f681, change.levels=T),
f683=Quantilize2(df2$f683, df_test2$f683, change.levels=T),
f712=Quantilize2(df2$f712, df_test2$f712, change.levels=T),
f715=Quantilize2(df2$f715, df_test2$f715, change.levels=T),
f726=Quantilize2(df2$f726, df_test2$f726, change.levels=T),
f727=Quantilize2(df2$f727, df_test2$f727, change.levels=T),
f772=Quantilize2(df2$f772, df_test2$f772, change.levels=T),
tantrev = b)
pred.test <- 100 * predict(m_fine, type="response", newdata=df_test_quantile)
test_loss <- rep(0, nrow(df_test2))



# Medians from the train set
# (0.0122,0.89]         (0.89,1.63]          (1.63,2.2]          (2.2,2.74]         (2.74,3.29] 
# 0                   1                   1                   2                   2 
# (3.29,3.88]         (3.88,4.55]         (4.55,5.26]         (5.26,6.07]         (6.07,6.97] 
# 3                   3                   3                   4                   5 
# (6.97,8.08]          (8.08,9.8]          (9.8,12.1]         (12.1,16.1]         (16.1,56.2] 
# 6                   7                   8                   9                  16 


# Calculate predicted loss (median) on the test set 
test_loss[pred.test > 0.89 & pred.test <= 2.20] <- 1
test_loss[pred.test > 2.20 & pred.test <= 3.29] <- 2
test_loss[pred.test > 3.29 & pred.test <= 5.26] <- 3
test_loss[pred.test > 5.26 & pred.test <= 6.07] <- 4
test_loss[pred.test > 6.07 & pred.test <= 6.97] <- 5
test_loss[pred.test > 6.97 & pred.test <= 8.08] <- 6
test_loss[pred.test > 8.08 & pred.test <= 9.80] <- 7
test_loss[pred.test > 9.80 & pred.test <= 12.10] <- 8
test_loss[pred.test > 12.1 & pred.test <= 16.10] <- 9
test_loss[pred.test > 16.1] = 16

# Merge with the rest of the test set (prediction = 0)
df_ans <- data.frame(id=df_test2$id, test_loss)
df_ans2 <- merge(data.frame(id=df_test$id), df_ans, by="id", all.x=T)
names(df_ans2) <- c("id","loss")
df_ans2$loss[is.na(df_ans2$loss)] <- 0

# Save the submission
write.csv(df_ans2, "submission.csv", row.names=F, quote=F)
