# author: zuowang  time:2021/10/9
"""
PassengerId => 乘客ID
Pclass => 乘客等级(1/2/3等舱位)
Name => 乘客姓名
Sex => 性别
Age => 年龄
SibSp => 堂兄弟/妹个数
Parch => 父母与小孩个数
Ticket => 船票信息
Fare => 票价
Cabin => 客舱
Embarked => 登船港口
"""
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import OneHotEncoder


import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
import seaborn
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.feature_selection import SelectKBest
from sklearn import svm

import warnings
input_df= pd.read_csv(r'C:\Users\wodediannao\Desktop\ipython\titanic\train.csv', header=0)
submit_df = pd.read_csv(r'C:\Users\wodediannao\Desktop\ipython\titanic\test.csv', header=0)
# 合并他们
df = pd.concat([input_df, submit_df])
# print(df)

# 重建index
df.reset_index(inplace=True)
# print(df)

# 删除reset_index()产生的index column
df.drop('index', axis=1, inplace=True)
#print(df)

# 查看train.csv中缺失的数据
#print("查看train.csv中缺少数据：\n", input_df.isnull().sum())
# Age            177
# Cabin          687
# Embarked         2
#print("-" * 40)

# 查看test.csv中缺失的数据
#print("查看test.csv中缺少数据：\n", submit_df.isnull().sum())
# Age             86
# Fare             1
# Cabin          327

# 查看合并后缺失的数据
#print("查看合并后缺失的数据：\n", df.isnull().sum())
# 查看信息
#print(df.info())
target_train = input_df['Survived']
#test_target = submit_df['Survived']

## 处理缺失值
# Cabin => 客舱
# Ticket => 船票信息
# PassengerId => 乘客ID
# 忽略它:
df = df.drop(['Cabin', 'Ticket', 'PassengerId'], axis=1)
#print(df)

#print(df.info())

#  Age => 年龄
# # version1
import matplotlib.pyplot as plt
# average_age= df["Age"].mean()
# df['Age'][df.Age.isnull()] = average_age
# # # 可视化
# fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
# #figsize:设定figure尺寸。系统默认命令是rcParams["figure.fig.size"] = [6.4, 4.8]，即figure长宽为6.4 * 4.8；
#
# axis1.set_title('Original Age values')
# axis2.set_title('New Age values')
#
# df['Age'].plot(kind='hist', bins=70, ax=axis1)
# df['Age'].plot(kind='hist', bins=70, ax=axis2)
# plt.show()

import numpy as np
# version2
# 试试随机选取平均值加减标准差范围的数来改进，使数据更接近真实情况。
average_age   = df["Age"].mean()
std_age       = df["Age"].std()
count_nan_age = df["Age"].isnull().sum()
rand = np.random.randint(average_age - std_age, average_age +
                         std_age, size = count_nan_age)
df['Age'][df.Age.isnull()] = rand
df1 = df['Age']
# # 可视化
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
axis1.set_title('Original Age values')
axis2.set_title('New Age values')
df['Age'].plot(kind='hist', bins=70, ax=axis1)
#df['Age'].plot(kind='hist', bins=70, ax=axis2)
df1.plot(kind='hist', bins=70, ax=axis2)
plt.show()
print ( "填充缺失值前：", count_nan_age )
print ( "填充缺失值后：", df["Age"].isnull ().sum () )

# 接着，我们需要从年龄中提取一个特征出来--孩子
# def is_child(age):
#     if age < 16:
#         return 1
#     else:
#         return 0
# # 可视化
import seaborn

# df['Child'] = df['Age'].apply ( is_child )  # 小于16岁的认为是孩子
# fig, (axis1,axis2) = plt.subplots(1,2,figsize=(10,5))
# seaborn.countplot(x='Child', data=df, ax=axis1)
# child_survive = df[["Child", "Survived"]].groupby(['Child'],as_index=False).mean()
#
# seaborn.barplot(x='Child', y='Survived', data=child_survive, ax=axis2)
# plt.show()

# Embarked => 登船港口:
# Embarked（港口编号）只有2个缺失项，直接用最常见的值填充它，然后用图表看看其与Survived的关系：
# count_nan_age = df["Embarked"].isnull ().sum ()
# df.Embarked[df.Embarked.isnull ()] = df.Embarked.dropna ().mode ().values
# input_df_tmp = df[:input_df.shape[0]]  # 取出input_df部分，因为只有他们才有Survived特征。
# (s, c, q) = df['Embarked'].value_counts ()
# embark_percentage = pd.DataFrame ( {
#     'Embarked': np.array ( ['S', 'C', 'Q'] ),
#     'percentage': np.array ( [float ( i ) / df['Embarked'].count () for i in (s, c, q)] )} )
# # 可视化
# fig, (axis1, axis2, axis3) = plt.subplots ( 1, 3, figsize=(15, 5) )
# seaborn.barplot ( x='Embarked', y='percentage', data=embark_percentage, ax=axis1 )
# seaborn.countplot ( x='Survived', hue="Embarked", data=input_df_tmp, order=[1, 0], ax=axis2 )
# embark_perc = input_df_tmp[["Embarked", "Survived"]].groupby ( ['Embarked'], as_index=False ).mean ()
# seaborn.barplot ( x='Embarked', y='Survived', data=embark_perc, order=['S', 'C', 'Q'], ax=axis3 )
# plt.show ()
# print ( "填充缺失值前：", count_nan_age )
# print ( "填充缺失值后：", df["Embarked"].isnull ().sum () )
#
#

# 变量转换
# Pclass => 乘客等级(1/2/3等舱位) ：
# 可视化
# input_df_tmp = df[:input_df.shape[0]]
# seaborn.factorplot('Pclass', 'Survived', order=[1,2,3], data=input_df_tmp, size=6)
# plt.show()
# Pclass（客舱等级）考虑做Dummy Variables处理，使其生成更多的特征。
pclass_dummies = pd.get_dummies ( df['Pclass'] )
pclass_dummies.columns = ['Class_1', 'Class_2', 'Class_3']
pclass_dummies.drop ( ['Class_3'], axis=1, inplace=True )
df.drop ( ['Pclass'], axis=1, inplace=True )
df = df.join ( pclass_dummies )

# Sex（性别）:
# 可视化
# fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
# seaborn.countplot(x='Sex', data=df, ax=axis1)
# women_survive = df[["Sex", "Survived"]].groupby(['Sex'],as_index=False).mean()
# seaborn.barplot(x='Sex', y='Survived', data=women_survive, ax=axis2)
# plt.show()
# 将Sex用数字表示:
df['Sex'][df['Sex'] == 'male'] = 1
df['Sex'][df['Sex'] == 'female'] = 0
df['Sex'] = df['Sex'].astype ( int )

# Name（姓名），Parch（父母/孩子的数量），SibSp（配偶的数量）：
# 我们需要从这里面提取中他们是否有家人在船上这一特征，
# 因为有家人意味着逃生过程中会有家人的帮助，生存率可能更高。
df['WithFamily'] = df["Parch"] + df["SibSp"]
df['WithFamily'].loc[df['WithFamily'] > 1] = 1
df['WithFamily'].loc[df['WithFamily'] == 0] = 0
# 可视化
# input_df_tmp = df[:input_df.shape[0]]
# fig, (axis1,axis2) = plt.subplots(1,2,sharex=True,figsize=(10,5))
# seaborn.countplot(x='WithFamily', data=df, order=[1,0], ax=axis1)
# family_perc = input_df_tmp[["WithFamily", "Survived"]].groupby(['WithFamily'],as_index=False).mean()
# seaborn.barplot(x='WithFamily', y='Survived', data=family_perc, order=[1,0], ax=axis2)
# axis1.set_xticklabels(["With Family","Alone"], rotation=0)
# plt.show()
# '''
# 绘图
# input_df_tmp = df[:input_df.shape[0]]
# fig, (axis1, axis2) = plt.subplots ( 1, 2, sharex=True, figsize=(10, 5) )
# seaborn.countplot ( x='WithFamily', data=df, order=[1, 0], ax=axis1 )
# family_perc = input_df_tmp[["WithFamily", "Survived"]].groupby ( ['WithFamily'], as_index=False ).mean ()
# seaborn.barplot ( x='WithFamily', y='Survived', data=family_perc, order=[1, 0], ax=axis2 )
# axis1.set_xticklabels ( ["With Family", "Alone"], rotation=0 )
# plt.show ()
# '''

# 变量转换
embark_dummies = pd.get_dummies ( df['Embarked'] )
df = df.join ( embark_dummies )
df.drop ( ['Embarked'], axis=1, inplace=True )

# 处理姓
df['Surname'] = df['Name'].map ( lambda x: re.compile (
    "(Mr|Mrs|Miss|Master|Don|Rev|Dr|Mme|Ms|Major|Lady|Sir|Mlle|Col|Capt|the Countess|Jonkheer|Dona)\.\s(\w*)" )
                                 .findall (x )[0][1] )
df['Surname'] = pd.factorize ( df['Surname'] )[0]
# 处理称谓
df['Title'] = df['Name'].map ( lambda x: re.compile ( ", (.*?)\." ).findall ( x )[0] )
df['Title'][df.Title == 'Jonkheer'] = 'Master'
df['Title'][df.Title.isin ( ['Ms', 'Mlle'] )] = 'Miss'
df['Title'][df.Title == 'Mme'] = 'Mrs'
df['Title'][df.Title.isin ( ['Capt', 'Don', 'Major', 'Col', 'Sir'] )] = 'Sir'
df['Title'][df.Title.isin ( ['Dona', 'Lady', 'the Countess'] )] = 'Lady'
df['Title_id'] = pd.factorize ( df['Title'] )[0] + 1
df = df.drop ( ['Title'], axis=1 )
df.drop ( ['Name'], axis=1, inplace=True )
# 处理家庭大小
df['FamilySize'] = df["Parch"] + df["SibSp"] + 1
df['FamilySize'].loc[df['FamilySize'] < 3] = 'small'
df['FamilySize'].loc[df['FamilySize'] != 'small'] = 'big'
df['FamilySize'][df['FamilySize'] == 'small'] = 0
df['FamilySize'][df['FamilySize'] == 'big'] = 1
df['FamilySize'] = df['FamilySize'].astype ( int )


# 模型需要的数据
df.drop(['Survived'], axis=1, inplace=True )
train_data = df[:input_df.shape[0]].values
train_target = target_train
test_data = df[input_df.shape[0]:].values
#print(input_df.shape)

#  1.0 离差标准化数据
train_data = MinMaxScaler ().fit_transform ( train_data )
test_data = MinMaxScaler ().fit_transform ( test_data )
#print(train_data)
#print(train_data.shape)
test_data1 = df[input_df.shape[0]:]
#print(test_data1.head(1))
# print(np.isnan(train_data).any(axis=0))
#print(np.isnan(test_data).any(axis=0))
#print(pd.isna(train_data).sum())
#print(pd.isna(test_data).sum())

#train_data=.fillna(0)
# # 2.0 标准差标准化数据
# # 没有离差标准化数据准确率高
# train_data=StandardScaler().fit_transform(train_data)
# test_data=StandardScaler().fit_transform( test_data )
# # 3.0 对特征进行二值化处理
# # 没有离差标准化数据准确率高
# train_data=Binarizer().fit_transform(train_data)
# test_data=Binarizer().fit_transform(test_data)
# # 4.0 对定性特征进行独热编码处理
# # 没有离差标准化数据准确率高
# train_data=OneHotEncoder().fit_transform(train_data)
# test_data=OneHotEncoder().fit_transform(test_data)
# PCA降维
# pca_model=PCA(n_components=15).fit(train_data)
# train_data=pca_model.transform(train_data)
# pca_model=PCA(n_components=15).fit(test_data)
# test_data=pca_model.transform(test_data)

# 可视化研究不同特征的影响
# import matplotlib.pyplot as plt
# features_list = df.columns.values
# # Fit a random forest with (mostly) default parameters to determine feature importance
# forest = RandomForestClassifier(oob_score=True, n_estimators=10000)
# forest.fit( train_data, train_target )
# feature_importance = forest.feature_importances_
# # make importances relative to max importance
# feature_importance = 100.0 * (feature_importance / feature_importance.max())
# # Get the indexes of all features over the importance threshold
# important_idx = np.where(feature_importance)[0]
# # Get the sorted indexes of important features
# sorted_idx = np.argsort(feature_importance[important_idx])[::-1]
# # print ("\nFeatures sorted by importance (DESC):\n", features_list[sorted_idx])
# # Adapted from http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_regression.html
# pos = np.arange(sorted_idx.shape[0]) + .5
# plt.subplot(1, 2, 2)
# plt.barh(pos, feature_importance[important_idx][sorted_idx[::-1]], align='center')
# plt.yticks(pos, features_list[sorted_idx[::-1]])
# plt.xlabel('Relative Importance')
# plt.title('Variable Importance')
# plt.savefig('研究不同数据对结果的影响')
# plt.show()
#
# print(features_list[sorted_idx[::-1]])
# '''
# df.drop(['Q'],axis=1,inplace=True)
# '''
# pipe=Pipeline([('select',SelectKBest(k=15)),
#                ('classify', RandomForestClassifier(random_state = 10, max_features = 'sqrt'))])
# param_test = {'classify__n_estimators':list(range(20,50,2)),
#               'classify__max_depth':list(range(3,60,3))}
# gsearch = GridSearchCV(estimator = pipe, param_grid = param_test, scoring='roc_auc', cv=10)
# gsearch.fit(train_data,train_target)
# print(gsearch.best_params_, gsearch.best_score_)
# '''
#
# df.drop ( ['Q'], axis=1, inplace=True )
# #
# # 随机森林
# '''预测结果0.81'''
# from sklearn.ensemble import RandomForestClassifier
#
# # random_forest = RandomForestClassifier ( random_state=1, n_estimators=36,
# #                                          min_samples_split=4, min_samples_leaf=2, max_depth=9 )
# random_forest = RandomForestClassifier ( warm_start=True,
#                                          random_state=1,
#                                          n_estimators=26,
#                                          max_depth=6,
#                                          max_features='sqrt', min_samples_split=4, min_samples_leaf=2 )
# random_forest.fit ( train_data, train_target )
# Y_pred = random_forest.predict ( test_data )
# print ( random_forest.score ( train_data, train_target ) )
# submission = pd.DataFrame ( {
#     "PassengerId": submit_df["PassengerId"],
#     "Survived": Y_pred.astype ( int )
# } )
# submission.to_csv ( 'result11.csv', index=False )


# GBDT
'''预测结果0.73'''
from sklearn.ensemble import GradientBoostingClassifier
GBDT = GradientBoostingClassifier(n_estimators=1000)
GBDT.fit( train_data, train_target )

Y_pred = GBDT.predict( test_data )
print ( GBDT.score( train_data, train_target ) )
submission = pd.DataFrame({
	    "PassengerId": submit_df["PassengerId"],
	    "Survived": Y_pred.astype(int)
	})
submission.to_csv('result3.csv', index=False)
#
#
# # # SVM模型
'''预测结果：0.77'''
svm = svm.SVC ().fit ( train_data, train_target )
print ( '建立的SVM模型为：\n', svm )
pred = svm.predict ( test_data)
print ( "预测前20个结果为：\n", pred[:20] )
submission = pd.DataFrame({
	    "PassengerId": submit_df["PassengerId"],
	    "Survived": pred.astype(int)
	})
submission.to_csv('result5.csv', index=False)

#逻辑回归Logistic Regression

# from sklearn.linear_model import LogisticRegression
# LR = LogisticRegression(C=1.0,penalty="l2",tol=1e-6)
# LR.fit( train_data, train_target )
# pred = LR.predict( test_data )
# print(pred[:10])
# submission = pd.DataFrame({
# 	    "PassengerId": submit_df["PassengerId"],
# 	    "Survived": pred.astype(int)
# 	})
# submission.to_csv ( 'result10.csv', index=False )