import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import export_graphviz

from sklearn.ensemble import RandomForestClassifier


def random_forest_titanic():
    """
    决策树进行乘客生存预测
    :return:
    """
    # 1、获取数据
    titan = pd.read_csv("http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt")
    # 2、数据的处理
    x = titan[['pclass', 'age', 'sex']]
    y = titan['survived']
    # print(x , y)
    # 2.1 缺失值需要处理，将特征当中有类别的这些特征进行字典特征抽取
    x['age'].fillna(x['age'].mean(), inplace=True)
    # 2.2 对于x转换成字典数据
    x = x.to_dict(orient="records")
    # 3.分割训练集合测试集
    x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=22)
    # 4.字典特征抽取
    transfer = DictVectorizer()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.随机森林预估器 max_depth:决策树深度
    estimator = RandomForestClassifier(criterion="entropy", max_depth=8)

    # 4.1 加入网格搜索与交叉验证
    param_dict = {"n_estimators": [120,200,300,500,800,1200],
                  "max_depth": [5,8,15,25,30]}
    estimator = GridSearchCV(estimator, param_grid=param_dict, cv=3)
    estimator.fit(x_train, y_train)
    # 5.模型评估
    # 方法一：直接比对预测值与真实值
    y_predict = estimator.predict(x_test)
    print("y_predict:\n", y_predict)
    print("y_test:\n", y_test)
    print("预测值与真实值比对：\n", y_predict == y_test)
    # 方法二：计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为：\n", score)
    # 最佳参数 best_params_
    print("最佳参数：\n", estimator.best_params_)
    # 最佳结果 best_score_
    print("最佳结果：\n", estimator.best_score_)
    # 最佳估计器 best_estimator_
    print("最佳估计器：\n", estimator.best_estimator_)
    # 交叉验证结果 cv_results_
    print("交叉验证结果：\n", estimator.cv_results_)

    return None

if __name__ == "__main__":
    random_forest_titanic()