# Kaggle doesn"t accept scientific format (1e6) so fix R to not do this
options(scipen=999)
require(gbm)
require(Hmisc)
setwd("~/accept_decline")

# Load data
tt1 <- readRDS("train_v2.rds")
tt2 <- readRDS("test_v2.rds")

# Parameters for classification and regression models
vars_clf <- c("f2", "f4",  "f10", "f13", "f222", "f268", "f271", "f273", "f274",
              "f332", "f404", "f527", "f528", "f545", "f653", "f670", "f777")
vars_reg <- c("f527", "f528", "f274", "f515", "f776", "f120", "f83", "f376",
              "f223", "f2", "f338", "f298", "f17", "f652", "f9", "f629", "f52",
              "f597", "f253", "f596", "f130", "f68", "f766", "f84", "f228",
              "f404", "f25", "f332", "f670", "f67", "f14", "f171", "f175", 
              "f273", "f377", "f397", "f477", "f79", "f28", "f95", "f268", 
              "f270", "f229", "f230", "f91", "f121", "f258", "f131", "f90", 
              "f89", "f260", "f598", "f263", "f259", "f124", "f13", "f281",
              "f676", "f367", "f271", "f54", "f15")

# Keep only necessary columns
dat_clf <- tt1[ , vars_clf]
dat_lgd <- tt1[ , vars_reg]
test_clf <- tt2[ , vars_clf]
test_lgd <- tt2[ , vars_reg]

# Impute missing values with median (don"t use test medians because of added noise)
dat_clf <- data.frame(sapply(dat_clf, impute, fun = median))
for(i in colnames(dat_clf)) {
	test_clf[is.na(test_clf[ , i]), i] <- median(dat_clf[, i])
}

dat_lgd <- data.frame(sapply(dat_lgd, impute, fun = median))
for(i in colnames(dat_lgd)) {
	test_lgd[is.na(test_lgd[ , i]), i] <- median(dat_lgd[ , i])
}

# Calculate derived variables
dat_clf  <- data.frame(dat_clf, loss = tt1$loss)
dat_clf  <- transform(dat_clf, loss_ind = as.integer(loss > 0), fnew = f528 - f527,
                     fnew2 = f274 - f528, fnew3 = f274 - f527)
test_clf <- transform(test_clf, fnew = f528 - f527, fnew2 = f274 - f528, 
                      fnew3 = f274 - f527)
dat_lgd  <- transform(dat_lgd, fnew = f528 - f527, fnew2 = f274 - f528, 
                      fnew3 = f274 - f527)
dat_lgd  <- data.frame(dat_lgd, loss = tt1$loss)
test_lgd <- transform(test_lgd, fnew = f528 - f527, fnew2 = f274 - f528, 
                      fnew3 = f274 - f527)

# Train default classifier
f_class <- loss_ind ~ fnew + f271 + f2 + f332 +  f13 + f10 + fnew2 + fnew3 + f222

final_classifier <- gbm(f_class, data = dat_clf, distribution = "bernoulli", 
                        n.trees = 600, shrinkage = 0.1, train.fraction = 0.9, 
                        bag.fraction = 1, interaction.depth = 8, n.minobsinnode = 1)

# Train loss regression model
dat_lgd <- dat_lgd[predict(final_classifier, n.trees = 600) > 0, ]

f_reg <- loss ~ f527 + f528 + f274 + f515 + f776 + 
  f120 + f83 + f376 + f223 + f2 + f338 + f298 + f17 + f652 + f9 + f629 + f52 + 
  f597 + f253 + f596 + f130 + f68 + f766 + f84 + f228 + f404 + f25 + f332 +  
  f670 + f67 + f14 + f171 + f175 + f273 + f377 + f397 + f477 + f79 + f28 + 
  f95 + f268 + f270 + f229 + f230 + f91 + f121 + f258 + f131 + f90 + f89 +
  f260 + f598 + f263 + f259 + f124 + f13 + f281 + f676 + f367 + f271 + f54 + 
  fnew + fnew2 + fnew3

set.seed(2014); final_regressor <- 
  gbm(f_reg, distribution = "laplace", data=dat_lgd, n.trees = 1000, 
      interaction.depth = 14, shrinkage = 0.027, bag.fraction = 0.5,
      train.fraction = 0.9)

# Make predictions
mask <- predict(final_classifier, test_clf, n.trees = 600)
predictions <- predict(final_regressor, test_lgd, n.trees = 1000)
predictions[mask <= 0] <- 0

# Save submission file
sub1 <- data.frame(id = tt2$id,loss = predictions)
sub1$loss[sub1$loss < 0] <- 0
write.csv(sub1, "submission.csv", row.names=FALSE)
