library(mgcv)
library(gam)
library(tidyverse)
library(varhandle)
library(mice)
library(caret)
library(pROC)
require(caret)
#1.数据清洗与读取
#读取清洗后的数据
full <- read.csv('G:/我的资料/大三下学习资料/广义线性模型/作业/广义线性模型大作业-曹轩豪、樊帅/dataset--titanic/cleaned.csv',header=TRUE)
f_test  <- read.csv('G:/我的资料/大三下学习资料/广义线性模型/作业/广义线性模型大作业-曹轩豪、樊帅/dataset--titanic/test--result.csv',header=TRUE)

#数据预处理，转换成因子型变量
full$Survived <- factor(full$Survived, 
                         levels=c(0,1),
                         labels=c("dead","survived"),ordered = TRUE)
full$Sex<-factor(full$Sex,ordered = TRUE)
full$Pclass<- factor(full$Pclass, 
                      levels=c(1,2,3),
                      labels=c("1","2","3"),ordered = TRUE)
full$Title<- factor(full$Title, ordered = TRUE)
full$Embarked<-factor(full$Embarked,ordered = TRUE)
full$Deck<-factor(full$Deck,ordered=TRUE)
full$SibSp<-factor(full$SibSp,ordered=TRUE)

train<-full[1:891,]
test<-full[892:1309,]

#2.描述性统计
complete <- full[1:891,]
complete$Survived <- factor(complete$Survived, levels=c(1,0))
levels(complete$Survived) <- c("Survived", "Died")
ggplot(complete, aes(x = Survived, y = Age))+geom_point()+geom_boxplot(fill=NA, outlier.colour=NA)
# Survived和Age的关系图

ggplot(complete[1:891,], aes(Fare, fill = Survived)) +   geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity')
# Survived和Fare的关系图

ggplot(complete[1:891,], aes(x = Pclass, fill = factor(Survived))) +  geom_bar(stat='count', position='dodge') +  labs(x = 'Class') +  theme_few()
# Survived和Pclass的关系图
ggplot(complete,aes(x=Age)) + 
  geom_histogram(data=complete, alpha = 0.2, 
                 fill = "blue",
                 position = 'identity')+
  geom_density(data=complete,col=2)+
  labs(title="Age Distribution")+
  labs(x="Age")
#绘制年龄分布情况

train <- full[1:891,]
summary(train) # 查看训练数据集

split <- createDataPartition(y = train$Survived, p = TRAIN_TEST_SPLIT, list = FALSE) #训练集分出内部测试集

new_train <- train[split, ] 
new_test <- train[-split, ]

#3.模型拟合
#3.1 logistic模型
#3.1.1 筛选变量
m_new_features <- glm(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + Deck + TicketType + NumSameCabin + FamilySize, data = new_train, family = binomial)
# 所有变量直接拟合

prediction <- predict(m_new_features, new_test, type = "response")

prediction <- ifelse(prediction > 0.5, 1, 0)

mean(prediction == new_test$Survived) # 在内部集中做测试

cor(train[, c("Age", "SibSp", "Fare", "NumSameCabin", "Parch")]) # 查看部分变量的相关系数

m_all_train_data <- glm(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + Deck + TicketType + NumSameCabin + FamilySize, data = train, family = binomial)
# 所有变量拟合，数据集是训练集所有数据

print(anova(m_all_train_data, test = "Chisq")) # 变量筛选

m_unreduced_features <- glm(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + Deck + TicketType + NumSameCabin + FamilySize, data = new_train, family = binomial)

m_reduced_features <- glm(Survived ~ Pclass + Sex + Age + SibSp + Title + Embarked + TicketType, data = new_train, family = binomial())
#剔除部分变量后的模型

anova(m_unreduced_features,m_reduced_features,test="Chi")# 方差分析

prediction <- predict(m_reduced_features, new_test, type = "response")
prediction <- ifelse(prediction > 0.5, 1, 0)

mean(prediction == new_test$Survived) # 内部预测

m_interacting <- glm(Survived ~ (Pclass + Sex + Age + SibSp + Title + Embarked + FamilySize)^2 
                     , data = train, family = binomial)
# 构造包含交互项的模型

summary(m_interacting)

# 以下通过不断做逐步回归筛选变量，直至全部通过检验
m_tr <- glm(Survived ~ Pclass +  Title + FamilySize + Pclass:FamilySize, data = train, family = binomial())
tstep <-step(m_tr)
summary(tstep)
drop1(tstep)


anova(m_interacting,m_tr,test="Chi")# 方差分析

#确定预测阈值
map(seq(0.1, 0.9, by = 0.1), ~{
  prediction <- predict(m_tr, test, type = "response")
  prediction <- ifelse(prediction > .x, 1, 0)
  list(threshold = .x, accuracy = mean(prediction == result$Survived, na.rm = TRUE))
}) %>%
  bind_rows() %>%
  ggplot(aes(threshold, accuracy)) +
  geom_line()


train <- full[1:891,]
test <- full[892:1309,]
m_final <- glm(Survived ~ Pclass + Title + FamilySize + Pclass:FamilySize, data = train, family = binomial())
# 最终模型取逐步回归后的最优模型

#3.1.2 方差分析
plot(m_final) # 绘制模型残差图、杠杆图等

summary(m_final)$deviance / summary(m_final)$df.residual #过度分散系数，大于1.5则存在过度分散

#3.1.3 预测
pre <- predict(m_final, test, type = "response")

pre <- ifelse(pre > 0.5, 1, 0)

roc1 = roc(result$Survived,pre) # 绘制ROC图
plot(roc1,print.auc=TRUE,auc.polygon=TRUE,grid=c(0.1,0.2),grid.col=c("green","red"),max.auc.polygon=TRUE,print.thres=TRUE)
#ROC

pred.class <- as.integer(pre)
cft <- table(pred.class, result$Survived) # print混淆矩阵
confusionMatrix(cft, positive = "1")

#3.1.4 置信区间估计
CI <- confint(m_final)
parameter <- cbind(summary(m_final)$coef[,'Estimate'],CI)
parameter # 查看置信区间


#3.2 广义可加模型（gam）
#3.2.1 模型拟合
model3<-gam(Survived ~ Pclass+ Title+ SibSp+s(FamilySize,6)
            ,data=train,family=binomial)
model4<-gam(Survived ~ Pclass+ Title+ SibSp+FamilySize
            ,data=train,family=binomial)
summary(model3)
anova(model3)

#3.2.2 作图--查看各个变量对因变量的影响
par(mfrow=c(1,3))
plot(model3,se=TRUE,resid=T) 
plot(model3)
length(train$FamilySize[which(train$FamilySize>8)])  #FamilySize>8的人数
gam.check(model3)

#3.2.3 在测试集上预测结果
prediction <- predict(model3, test, type = "response")
prediction <- ifelse(prediction > 0.5, 1, 0)

#精度判断
mean(prediction == f_test$Survived)
#混淆矩阵
table(as.integer(prediction),f_test$Survived) 

#3.2.4 方差分析
AIC(model4,model3)  #比较对FamilySize加上样条函数拟合前后的2个模型，发现加上之后的模型更显著
anova(model4,model3,test='Chisq')
residuals(model3,type='pearson')
roc2=roc(f_test$Survived,prediction)  #作ROC图
plot(roc2,print.auc=TRUE,auc.polygon=TRUE,grid=c(0.1,0.2),grid.col=c("green","red"),max.auc.polygon=TRUE,print.thres=TRUE)


m_final <- glm(Survived ~ Pclass + Sex + Age + SibSp + Title + Embarked + FamilySize + Pclass:Sex + Pclass:Age + Sex:SibSp + Sex:Embarked + Age:SibSp + Pclass:FamilySize + Embarked:FamilySize, data = train, family = binomial())
prediction <- predict(m_final, test, type = "response")
prediction <- ifelse(prediction > 0.5, 1, 0)

write.table(prediction,file="G:/我的资料/大三下学习资料/广义线性模型/作业/广义线性模型大作业-曹轩豪、樊帅/dataset--titanic/prediction3.csv",row.names=TRUE,col.names=TRUE,sep=",")
write.table(pre,file="G:/我的资料/大三下学习资料/广义线性模型/作业/广义线性模型大作业-曹轩豪、樊帅/dataset--titanic/prediction2.csv",row.names=TRUE,col.names=TRUE,sep=",")

library(Matrix)
library(xgboost)
traindata <- list(data=train,label=test) 
dtrain <- xgb.DMatrix(data = traindata4$data, label = traindata4$label) 
xgb <- xgboost(data = dtrain,max_depth=5, eta=0.5,  objective='binary:logistic', nround=25)
pre_xgb = round(predict(xgb,newdata = dtest))















