import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pylab import *
import warnings
warnings.filterwarnings('ignore')

# =============== 一、数据检索
df=pd.read_csv('datasets_ML2/titanic.csv')
train_df, test_df = train_test_split(df)
#添加一列区分训练集合还是测试集合
train_df["trainortest"]="train"
test_df["trainortest"]="test"
all_df=pd.concat((train_df,test_df))

print("train_df:",train_df.shape)
print("test_df:",test_df.shape)

train_df.info()

test_df.info()

all_df.info()

all_df.columns[pd.isnull(all_df).sum()>0]
print("all_df.columns[pd.isnull(all_df).sum()>0]:\n", all_df.columns[pd.isnull(all_df).sum()>0])

all_df.PassengerId.value_counts().sort_values
print("all_df.PassengerId.value_counts().sort_values:\n",all_df.PassengerId.value_counts().sort_values)

# =============== 二、数据分析
# --------------- 2.1按照船票等级进行分析
all_df.Pclass.value_counts()
print("all_df.Pclass.value_counts():\n", all_df.Pclass.value_counts())

train_df.Pclass.value_counts()
print("train_df.Pclass.value_counts():\n", train_df.Pclass.value_counts())

s0=train_df.Pclass[train_df.Survived == 0].value_counts()
print(s0)

s1=train_df.Pclass[train_df.Survived == 1].value_counts()
print(s1)

mpl.rcParams['font.sans-serif'] = ['SimHei']
df=pd.DataFrame({u'获救':s1, u'未获救':s0})
df.plot(kind='bar')
plt.title(u"各船票等级的获救情况")
plt.xlabel(u"船票等级")
plt.ylabel(u"人数")
plt.show()

# --------------- 2.2按照姓名进行分析
all_df.Name.head(10)
print(all_df.Name.head(10))

import re # 正则化处理
import numpy as np
regx = re.compile('(.*, )|(\\..*)')
title=[]
for name in all_df.Name.values:
    title.append(re.sub(regx,'',name))
all_df['title']=title

all_df.title.value_counts()
print(all_df.title.value_counts())

all_df.loc[all_df.title=='Mlle','title']='Miss'
all_df.loc[all_df.title=='Ms','title']='Miss'
all_df.loc[all_df.title=='Mme','title']='Mrs'

all_df.title.value_counts()
print(all_df.title.value_counts())

common=['Mr','Miss','Mrs','Master']
all_df.loc[~all_df.title.isin(common) ,'title']='rare'
all_df.title.value_counts()
print(all_df.title.value_counts())

# --------------- 2.3按照性别进行分析
s0=train_df.Sex[train_df.Survived == 0].value_counts()
s1=train_df.Sex[train_df.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':s1, u'未获救':s0})
df.plot(kind='bar')
plt.title(u"性别获救情况")
plt.xlabel(u"性别")
plt.ylabel(u"人数")
plt.show()


# =============== 三、复杂类型数据处理
# --------------- 3.1大量缺失数据：年龄特征处理流程
"""
年龄这个字段缺失的比较严重，但是按照常理分析，年龄对判定是否幸存有很大的帮助
 对于缺失值处理，我们一般会用如下四种方法进行处理：
1.删除缺失值
2.使用中值，均值，众数，最值进行填充
3.使用预测数据对缺失值进行填充
4.对数据进行升维处理
 为了能够更好的运算出更好的预测效果，我们使用对缺失值进行预测的方式来进行处理
 预测数值时切忌盲目操作，先需要分析各个特征之间的关系，找到好的特征才能更好的预测数值
"""

# --------------- 3.2船上兄弟姐妹，船上父母子女分析
all_df.SibSp.value_counts()
print(all_df.SibSp.value_counts())

all_df.Parch.value_counts()
print(all_df.Parch.value_counts())

all_df['family_cnt']=all_df['Parch']+all_df['SibSp']+1
all_df.family_cnt.value_counts()
print(all_df.family_cnt.value_counts())

all_df.loc[all_df.family_cnt==1,'family_type']='sigle'
all_df.loc[all_df.family_cnt.isin([2,3]),'family_type']='middle'
all_df.loc[all_df.family_cnt>=4,'family_type']='big'
all_df.family_type.value_counts()
print(all_df.family_type.value_counts())

# 创建新的特征，处理数据
train_df['family_cnt']=train_df['Parch']+train_df['SibSp']+1
train_df.loc[train_df.family_cnt==1,'family_type']='sigle'
train_df.loc[train_df.family_cnt.isin([2,3]),'family_type']='middle'
train_df.loc[train_df.family_cnt>=4,'family_type']='big'
# 进行可视化分析
s0=train_df.family_type[train_df.Survived == 0].value_counts()
s1=train_df.family_type[train_df.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':s1, u'未获救':s0})
df.plot(kind='bar')
plt.title(u"家庭规模获救情况")
plt.xlabel(u"家庭规模")
plt.ylabel(u"人数")
plt.show()

# --------------- 3.3查看票编号之间的关系
all_df.Ticket.value_counts()
print(all_df.Ticket.value_counts())
# --------------- 3.4分析船票数据
all_df.Fare.plot(kind='kde')
plt.show()

all_df.describe()
print(all_df.describe())

# --------------- 3.5Cabin客舱处理
cabin_type=[]
for tmp_cabin in all_df.Cabin.values:
    cabin_type.append(str(tmp_cabin)[0]) # 截取每个客舱号第一个字符
all_df['cabin_type']=cabin_type
all_df.cabin_type.value_counts()
print(all_df.cabin_type.value_counts())

all_df.loc[all_df.cabin_type=='G','cabin_type']='O'
all_df.loc[all_df.cabin_type=='T','cabin_type']='O'
all_df.cabin_type.value_counts()
print(all_df.cabin_type.value_counts())

train_cabin_type=[]
for tmp_cabin in train_df.Cabin.values:
    train_cabin_type.append(str(tmp_cabin)[0])
train_df['cabin_type']=train_cabin_type
train_df.loc[train_df.cabin_type=='G','cabin_type']='O'
train_df.loc[train_df.cabin_type=='T','cabin_type']='O'

s0=train_df.cabin_type[train_df.Survived == 0].value_counts()
s1=train_df.cabin_type[train_df.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':s1, u'未获救':s0})
df.plot(kind='bar')
plt.title(u"客舱号码类型获救情况")
plt.xlabel(u"客舱号码类型")
plt.ylabel(u"人数")
plt.show()

all_df.info()
print(all_df.info())

# --------------- 3.6Embarked登陆港口缺失值填充处理方式
all_df.Embarked.value_counts()
print(all_df.Embarked.value_counts())

all_df.loc[all_df.Embarked.isnull(),'Embarked']='S'
all_df.info()
print(all_df.info())

s0=train_df.Embarked[train_df.Survived == 0].value_counts()
s1=train_df.Embarked[train_df.Survived == 1].value_counts()
df=pd.DataFrame({u'获救':s1, u'未获救':s0})
df.plot(kind='bar')
plt.title(u"登录港口获救情况")
plt.xlabel(u"登录港口类型")
plt.ylabel(u"人数")
plt.show()

all_df.info()
print(all_df.info())


# =============== 四、特征工程处理
# --------------- 4.1针对离散型的数据，使用Onehot编码形式进行处理
all_df=pd.get_dummies(all_df,columns=['Pclass','Sex','Embarked','title','family_type','cabin_type'])
all_df.info()
print(all_df.info())

# --------------- 4.2针对连续型数据，使用标准化进行处理操作
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(all_df[['Fare']])
all_df['Fare_scaled'] = scaler.fit_transform(all_df[['Fare']], age_scale_param)

age_scale_param = scaler.fit(all_df[['family_cnt']])
all_df['family_cnt_scaled'] = scaler.fit_transform(all_df[['family_cnt']], age_scale_param)

age_scale_param = scaler.fit(all_df[['SibSp']])
all_df['SibSp_scaled'] = scaler.fit_transform(all_df[['SibSp']], age_scale_param)

age_scale_param = scaler.fit(all_df[['Parch']])
all_df['Parch_scaled'] = scaler.fit_transform(all_df[['Parch']], age_scale_param)

all_df.columns
print(all_df.columns)

# --------------- 4.3年龄预测相关特征
# 年龄预测相关特征
age_factor=[ 'Pclass_1', 'Pclass_2',
       'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q',
       'Embarked_S', 'title_Master', 'title_Miss', 'title_Mr', 'title_Mrs',
       'title_rare', 'family_type_big', 'family_type_middle',
       'family_type_sigle', 'cabin_type_A', 'cabin_type_B', 'cabin_type_C',
       'cabin_type_D', 'cabin_type_E', 'cabin_type_F', 'cabin_type_O',
       'cabin_type_n', 'Fare_scaled', 'family_cnt_scaled', 'SibSp_scaled',
       'Parch_scaled']
# 切分出所有年龄缺失的数据，选择所有年龄相关特征作为测试集特征数据
agenull_test=all_df.loc[all_df.Age.isnull(),age_factor]
# 切分出年龄不缺失的数据， 选择所有特征作为训练集特征数据
agenotnull_train=all_df.loc[~all_df.Age.isnull(),age_factor]
# 切分出年龄不缺失的数据， 将所有age作为标签
agenotnull_label=all_df.loc[~all_df.Age.isnull(),'Age']

from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(agenotnull_train.values,agenotnull_label.values)

age_pre=rfr.predict(agenull_test.values)

all_df.loc[all_df.Age.isnull(),'Age']=age_pre

all_df['Age'].to_csv('datasets_ML2/Age_titanic')
import seaborn as sns
sns.distplot(all_df['Age'])
plt.show()

all_df.info()
print(all_df.info())

all_df.pop('Name')
all_df.pop('Ticket')
all_df.pop('Cabin')
print('删除无用信息')

# --------------- 4.4将年龄进行分段处理
all_df.loc[all_df.Age<=12,'Age_type']='child'
all_df.loc[all_df.Age.between(13,40),'Age_type']='adult'
all_df.loc[all_df.Age>40,'Age_type']='old'

all_df.info()
print(all_df.info())

# --------------- 4.5年龄分段后进行独热编码处理
all_df=pd.get_dummies(all_df,columns=['Age_type'])

all_df.info()
print(all_df.info())

age_scale_param = scaler.fit(all_df[['Age']])
all_df['Age_scaled'] = scaler.fit_transform(all_df[['Age']], age_scale_param)

all_df.info()
print(all_df.info())

# --------------- 4.6删除无用特征
all_df.pop('Age')
all_df.pop('SibSp')
all_df.pop('Parch')
all_df.pop('Fare')
print('-')

all_df.info()
print(all_df.info())

# --------------- 4.7对所有数据进行最终的处理，得到有效数据
train_data=all_df[all_df.trainortest=='train']

test_data=all_df[all_df.trainortest=='test']

test_data.pop('trainortest')
train_data.pop('trainortest')
print('-')

test_data.info()
print(test_data.info())

train_data.info()
print(train_data.info())


# =============== 五、模型选择和评估
# --------------- 5.1x，y切分
# train_lable=y_train;
y_train = train_data.pop('Survived')

train_data.pop('PassengerId')
X=train_data.values
y=y_train.values

# --------------- 5.2逻辑回归计算
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV
from sklearn import linear_model
# C正则化系数的倒数  penalty 正则化方式 elasticnet  none
parameters = {'penalty': ('l1', 'l2'),
                  'C': [0.01,0.1,1,10,20]
              }
estimator = linear_model.LogisticRegression()
# 使用正则进行处理
gsearch = GridSearchCV(estimator, param_grid=parameters, scoring='precision', cv=10)
gsearch.fit(X=X,y=y )
print(gsearch.best_params_)
print(gsearch.best_score_)

clf_lr=linear_model.LogisticRegression(C=0.01, penalty='l2', tol=1e-6)
clf_lr.fit(X=X,y=y)
from sklearn.model_selection import learning_curve


def plot_learning_curve(estimator, title, X, y, ylim=None, cv=10, n_jobs=1,
                        train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True):
    """
    画出data在某模型上的learning curve.
    参数解释
    ----------
    estimator : 你用的分类器。
    title : 表格的标题。
    X : 输入的feature，numpy类型
    y : 输入的target vector
    ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
    cv : 做cross-validation的时候，数据分成的份数，其中一份作为cv集，其余n-1份作为training(默认为3份)
    n_jobs : 并行的的任务数(默认1)
    """

    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    if plot:
        plt.figure()
        plt.title(title)
        if ylim is not None:
            plt.ylim(*ylim)
        plt.xlabel(u"训练样本数")
        plt.ylabel(u"得分")
        plt.gca().invert_yaxis()
        plt.grid()

        plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                         alpha=0.1, color="b")
        plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                         alpha=0.1, color="r")
        plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label=u"训练集上得分")
        plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label=u"交叉验证集上得分")

        plt.legend(loc="best")

        plt.draw()
        plt.gca().invert_yaxis()
        plt.show()

    midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
    diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
    return midpoint, diff

plot_learning_curve(estimator=clf_lr,title="lr_learning_curve",X=X,y=y)

PassengerId=test_data.pop('PassengerId')
y_test = test_data.pop('Survived')
X_test = test_data.values
test_rs=clf_lr.predict(test_data.values)

lr_rs=pd.DataFrame({'PassengerId':PassengerId,'Survived':test_rs})

lr_rs.to_csv("datasets_ML2/lr_rs.csv",index=False)

lr_feature_score=pd.DataFrame({"columns":list(train_data.columns), "coef":list(clf_lr.coef_.T)})

lr_feature_score.sort_values(by='coef',ascending=False)

# --------------- 5.5使用svc进行分类
from sklearn.svm import SVC
parameters = {
"C":[0.1,1,2,5,10],
"kernel":['linear','poly','rbf'],
}
estimator = SVC()
gsearch = GridSearchCV(estimator, param_grid=parameters, scoring='roc_auc', cv=10)
gsearch.fit(X=X,y=y)
print(gsearch.best_params_)
print(gsearch.best_score_)
# gsearch.grid_scores_, gsearch.best_params_, gsearch.best_score_

clf_svc=SVC(C=2,kernel="poly",probability=True)
plot_learning_curve(estimator=clf_svc,title="svc_learning_curve",X=X,y=y)
clf_svc.fit(X=X,y=y)
print("clf_svc",clf_svc.coef0)

from sklearn.metrics import recall_score,classification_report,confusion_matrix,precision_score,roc_auc_score,roc_curve

def pingguzhibiao(model,X_test,y_test,title):
    pric_y_test=model.predict(X_test)
    print('分类报告{}'.format(title),classification_report(y_test,pric_y_test))
    print('混淆矩阵{}'.format(title),confusion_matrix(y_test,pric_y_test))
    print('查准率{}'.format(title),precision_score(y_test,pric_y_test))
    print('查全率{}'.format(title),recall_score(y_test,pric_y_test))
    #roc_curve
    pro_ba=model.predict_proba(X_test)  #得到概率预测值
    pro_ba=pro_ba[:,1]
    fpr,tpr,th=roc_curve(y_test,pro_ba)
    auc=roc_auc_score(y_test,pro_ba)
    print('AUC数值',auc)
    plt.title(label=title)
    plt.plot(fpr,tpr)
    plt.plot([[0,0],[1,1]],'rx--')
    plt.show()
pingguzhibiao(clf_lr,X_test,y_test,'逻辑回归')
pingguzhibiao(clf_svc,X_test,y_test,'SVM')