import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
# 多花样作图
import seaborn as sns
import warnings
from sklearn.ensemble import RandomForestRegressor
import re

def data_preprocess():
    warnings.filterwarnings('ignore')
    pd.set_option('display.max_columns', 1000)
    pd.set_option('display.width', 1000)
    pd.set_option('display.max_colwidth', 1000)

    train_data = pd.read_csv('train.csv')
    test_data = pd.read_csv('test.csv')
    sns.set_style('whitegrid')
    # 在去掉缺失值后的部分选择取众数的值
    train_data.Embarked[train_data['Embarked'].isnull()] = train_data['Embarked'].dropna().mode().values
    # 空值填充为u0
    train_data['Cabin'] = train_data.Cabin.fillna('U0')
    #提取一些较为有用的特征值分析预测年龄
    age_df = train_data[['Age','Survived','Fare', 'Parch', 'SibSp', 'Pclass']]
    age_df_notnull = age_df.loc[(train_data['Age'].notnull())]
    age_df_isnull = age_df.loc[(train_data['Age'].isnull())]
    X = age_df_notnull.values[:,1:]
    Y = age_df_notnull.values[:,0]
    # n_estimators你想要建立子树的数量  不限制处理器的数量
    RFR = RandomForestRegressor(n_estimators=1000,n_jobs=-1)
    RFR.fit(X,Y)
    predictAge = RFR.predict(age_df_isnull.values[:,1:])
    train_data.loc[train_data['Age'].isnull(), ['Age']]= predictAge
    return train_data,test_data

def data_analyze(train_data,test_date):
    # 性别船舱等级与生存的表现
    # print(train_data.groupby(['Sex', 'Survived'])['Survived'].count())
    # print(train_data[['Sex', 'Survived']].groupby(['Sex']).mean())
    # print(train_data.groupby(['Pclass', 'Survived'])['Pclass'].count())
    # print(train_data[['Pclass', 'Survived']].groupby(['Pclass']).mean())
    # print(train_data[['Sex','Pclass','Survived']].groupby(['Pclass','Sex']).mean())
    # print(train_data.groupby(['Sex', 'Pclass', 'Survived'])['Survived'].count())
    fig,ax = plt.subplots(1,2,figsize=(18,5))
    ax[0].set_yticks(range(0,110,10))
    sns.violinplot('Pclass','Age',hue='Survived',data=train_data,split=True,ax=ax[0])
    ax[0].set_title('Pclass and Age vs Survived')

    ax[1].set_yticks(range(0,110,10))
    sns.violinplot('Sex','Age',hue='Survived',data=train_data,split=True,ax=ax[1])
    ax[1].set_title('Sex and Age vs Survived')
    plt.show()
    # 画出年龄的直方图
    plt.figure(figsize=(15, 5))
    plt.subplot(121)
    train_data['Age'].hist(bins=100)#hist(bins=100)分成的组
    plt.xlabel('Age')
    plt.ylabel('Num')
    # 先找出一组数据的最大值、最小值、中位数和两个四分位数
    plt.subplot(122)
    train_data.boxplot(column='Age', showfliers=False)
    plt.show()
    facet = sns.FacetGrid(train_data, hue="Survived", aspect=4)
    facet.map(sns.kdeplot, 'Age', shade=True)
    facet.set(xlim=(0, train_data['Age'].max()))
    facet.add_legend()
    plt.show()
    # average survived passengers by age
    fig, axis1 = plt.subplots(1, 1, figsize=(18, 4))
    train_data['Age_int'] = train_data['Age'].astype(int)
    average_age = train_data[["Age_int", "Survived"]].groupby(['Age_int'], as_index=False).mean()
    sns.barplot(x='Age_int', y='Survived', data=average_age)
    plt.show()
    # 年龄分布的影响
    bins = [0, 12, 18, 65, 100]
    train_data['Age_group'] = pd.cut(train_data['Age'], bins)
    by_age = train_data.groupby('Age_group')['Survived'].mean()
    by_age.plot(kind='bar')
    plt.show()
#   正则匹配称呼进行分析
    train_data['Title'] = train_data['Name'].str.extract('([A-Za-z]+)\.',expand=False)
    # 参数expand=True在一组返回值的情况下，返回DataFrame expand=False在一组返回值的情况下，返回序列(Series)
    # 对比显示
    pd.crosstab(train_data['Title'], train_data['Sex'])
    # 名称与生存
    train_data[['Title', 'Survived']].groupby(['Title']).mean().plot.bar()
    plt.show()
    # 观察名字长度和生存率之间存在关系的可能
    fig, axis1 = plt.subplots(1, 1, figsize=(18, 4))
    train_data['Name_length'] = train_data['Name'].apply(len)
    name_length = train_data[['Name_length', 'Survived']].groupby(['Name_length'], as_index=False).mean()
    sns.barplot(x='Name_length', y='Survived', data=name_length)
    plt.show()

    # 将数据分为有兄弟姐妹和没有兄弟姐妹的两组：
    sibsp_df = train_data[train_data['SibSp'] != 0]
    no_sibsp_df = train_data[train_data['SibSp'] == 0]
    plt.figure(figsize=(11, 5))
    plt.subplot(121)
    sibsp_df['Survived'].value_counts().plot.pie(labels=['No Survived', 'Survived'], autopct='%1.1f%%')
    plt.xlabel('sibsp')
    plt.subplot(122)
    no_sibsp_df['Survived'].value_counts().plot.pie(labels=['No Survived', 'Survived'], autopct='%1.1f%%')
    plt.xlabel('no_sibsp')
    plt.show()
    # 将数据分为有父母和没有父母的两组：
    parch_df = train_data[train_data['Parch'] != 0]
    no_parch_df = train_data[train_data['Parch'] == 0]
    plt.figure(figsize=(11, 5))
    plt.subplot(121)
    parch_df['Survived'].value_counts().plot.pie(labels=['No Survived', 'Survived'], autopct='%1.2f%%')
    plt.xlabel('parch')
    plt.subplot(122)
    no_parch_df['Survived'].value_counts().plot.pie(labels=['No Survived', 'Survived'], autopct='%1.2f%%')
    plt.xlabel('no_parch')
    plt.show()
    # 有无亲友的影响
    fig, ax = plt.subplots(1, 2, figsize=(15, 5))
    train_data[['Parch', 'Survived']].groupby(['Parch']).mean().plot.bar(ax=ax[0])
    ax[0].set_title('Parch and Survived')
    train_data[['SibSp', 'Survived']].groupby(['SibSp']).mean().plot.bar(ax=ax[1])
    ax[1].set_title('SibSp and Survived')
    plt.show()
    train_data['Family_Size'] = train_data['Parch'] + train_data['SibSp'] + 1
    train_data[['Family_Size', 'Survived']].groupby(['Family_Size']).mean().plot.bar()
    plt.show()
    # 独自一人，那么其存活率比较低；但是如果亲友太多的话，存活率也会很低。


def data_convert(train_data,test_date):
    # 傀儡数值转换
    embark_dummies = pd.get_dummies(train_data['Embarked'])
    train_data = train_data.join(embark_dummies)
    train_data.drop(['Embarked'], axis=1, inplace=True)
    embark_dummies = train_data[['S', 'C', 'Q']]
    # 单一factor映射
    # Replace missing values with "U0"
    train_data['Cabin'][train_data.Cabin.isnull()] = 'U0'
    # create feature for the alphabetical part of the cabin number
    train_data['CabinLetter'] = train_data['Cabin'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group())
    # convert the distinct cabin letters with incremental integer values
    train_data['CabinLetter'] = pd.factorize(train_data['CabinLetter'])[0]
#     Scaling可以将一个很大范围的数值映射到一个很小范围（通常是 -1到1，或者是0到1），
# 很多情况下我们需要将数值做Scaling使其范围大小一样，
# 否则大范围数特征将会有更高的权重。
# 比如：Age的范围可能只是0-100，而income的范围可能是0-10000000，
# 在某些对数组大小敏感的模型中会影响其结果。
    assert np.size(train_data['Age']) == 891
    scaler = preprocessing.StandardScaler()
    train_data['Age_scaled'] = scaler.fit_transform(train_data['Age'].values.reshape(-1, 1))
#     Binning通过观察“邻居”（即周围的值）将连续数据离散化。
# 存储的值被分布到一些“桶”或“箱”中，
# 就像直方图的bin将数据划分成几块一样
    train_data['Fare_bin'] = pd.qcut(train_data['Fare'], 5)
    # factorize
    train_data['Fare_bin_id'] = pd.factorize(train_data['Fare_bin'])[0]

    # dummies
    fare_bin_dummies_df = pd.get_dummies(train_data['Fare_bin']).rename(columns=lambda x: 'Fare_' + str(x))
    train_data = pd.concat([train_data, fare_bin_dummies_df], axis=1)


def data_feature():
    train_df_org = pd.read_csv('train.csv')
    test_df_org = pd.read_csv('test.csv')
    test_df_org['Survived'] = 0
    combined_train_test = train_df_org.append(test_df_org)
    PassengerId = test_df_org['PassengerId']


if __name__ == '__main__':
    train_data,test_date = data_preprocess()
    data_analyze(train_data,test_date)
    data_convert(train_data,test_date)
    data_feature()
