source("service/main.r")

#Feature Engineering
#特征工程 第二回合
#这里是这篇的点睛之笔！点睛之笔！点睛之笔！
#A child will simply be someone under 18 years of age and a mother is a passenger who is
#1) female
#2) is over 18
#3) has more than 0 children (no kidding!)
#4) does not have the title ‘Miss’

#relationship between age & survival
ggplot(full[1:891,],aes(Age,fill=factor(Survived)))+
  geom_histogram()+
  facet_grid(.~Sex)+
  theme_few()

full$Child[full$Age < 18] <- "Child"
full$Child[full$Age >= 18] <- "Adult"

full$Mother <- "Not Mother"
full$Mother[full$Sex == "female" & full$Parch > 0 & full$Age > 18 & full$Title != "Miss"] <- "Mother"

table(full$Child,full$Mother)  %>%
  prop.table()
#factorize mother and child
full$Mother <- factor(full$Mother)
full$Child <- factor(full$Child)
md.pattern(full)

#prediction
#split into train $ test
train <- full[1:891,]
test <- full[892:1309,]

#building the model by randomForest

set.seed(199)
rf_model <- randomForest(factor(Survived)~Pclass + Sex + Age + SibSp + Parch + 
                           Fare + Embarked + Title + 
                           FsizeD + Child + Mother,
                         data=train,#以train数据集为基础 构建随机森林模型
                         ntree=2000
                         )
#show model error
par(mfrow=c(1,1))
plot(rf_model, ylim=c(0,0.36))
legend("topright",colnames(rf_model$err.rate),col=1:3,fill=1:3)

#variable importance
importance <- importance(rf_model)
varImportance <- data.frame(Variables=row.names(importance),
                            Importance = round(importance[,"MeanDecreaseGini"],2))#保留2为小数
rankImportance <- varImportance %>%
  mutate(Rank=paste0("#",dense_rank(desc(Importance))))
#use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance,
       aes(x=reorder(Variables,Importance),
           y=Importance,
           fill=Importance))+
  geom_bar(stat="identity")+
  geom_text(aes(x=Variables,
                y=.5,
                label=Rank),
            hjust=0,
            vjust=.55,
            size=4,
            colour='red')+
  labs(x="Variables")+
  coord_flip()+
  theme_few()
#pridict using the test set
prediction <- predict(rf_model,test)
solution <- data.frame(PassengerID = test$PassengerId,Survived = prediction)
write.csv(solution,file="output/rf_mod_solution2000.csv",row.names=F)


#结果不太理想 改为使用条件推理树 ,尝试了2000颗树，得分更低
library(party)
set.seed(199)
preditcfit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + 
                        Fare + Embarked + Title + 
                        FsizeD + Child + Mother,
                      data = train,
                      controls=cforest_unbiased(ntree=2000, mtry=3))
Prediction_c <- predict(preditcfit, test, OOB=TRUE, type = "response")
solution_c <- data.frame(PassengerID = test$PassengerId,Survived = Prediction_c)
write.csv(solution_c,file="output/rf_mod_solution2000_c.csv",row.names=F)





