# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 随机森林-泰坦尼克号预测.py
# @Author: dongguangwen
# @Date  : 2025-02-08 14:40
# 0.导入工具包
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report


# 1.读取数据
data = pd.read_csv('./data/titanic/train.csv')
# print(data.head())
# print(data.info())

# 2.数据处理
x = data[['Pclass', 'Sex', 'Age']].copy()
y = data['Survived'].copy()
# print(x.head(10))
x['Age'] = x['Age'].fillna(x['Age'].mean())
# print(x.head(10))
x = pd.get_dummies(x)
# print(x.head(10))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=22)

# 3.模型训练
# 3.1 决策树
model1 = DecisionTreeClassifier()
model1.fit(x_train, y_train)

# 3.2 随机森林
model2 = RandomForestClassifier()
model2.fit(x_train, y_train)

# 3.3 网格搜索交叉验证
params = {'n_estimators': [10, 20], 'max_depth': [2, 3, 4, 5]}
model3 = GridSearchCV(estimator=model2, param_grid=params, cv=5)
model3.fit(x_train, y_train)
print(model3.best_estimator_)

model4 = RandomForestClassifier(max_depth=4, n_estimators=10)
model4.fit(x_train, y_train)

# 4.模型预测与评估
# 4.1 决策树
model1_pred = model1.predict(x_test)
print(model1.score(x_test, y_test))

# 4.2 随机森林
model2_pred = model2.predict(x_test)
print(model2.score(x_test, y_test))

# 4.3 网格搜索交叉验证
model4_pred = model4.predict(x_test)
print(model4.score(x_test, y_test))

# 5.性能评估
print(classification_report(model1_pred, y_test))
print(classification_report(model2_pred, y_test))
print(classification_report(model4_pred, y_test))

"""
RandomForestClassifier(max_depth=4, n_estimators=10)
0.7821229050279329
0.776536312849162
0.7653631284916201
              precision    recall  f1-score   support

           0       0.86      0.80      0.83       119
           1       0.65      0.75      0.70        60

    accuracy                           0.78       179
   macro avg       0.76      0.77      0.76       179
weighted avg       0.79      0.78      0.79       179

              precision    recall  f1-score   support

           0       0.83      0.81      0.82       112
           1       0.70      0.72      0.71        67

    accuracy                           0.78       179
   macro avg       0.76      0.76      0.76       179
weighted avg       0.78      0.78      0.78       179

              precision    recall  f1-score   support

           0       0.85      0.79      0.82       118
           1       0.64      0.72      0.68        61

    accuracy                           0.77       179
   macro avg       0.74      0.75      0.75       179
weighted avg       0.77      0.77      0.77       179
"""
