#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Titanic 生存预测 – 决策树（完整脚本）
依赖：pandas, scikit-learn, matplotlib, seaborn
运行：python titanic_dt.py
"""

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
import seaborn as sns

# 1. 读取数据
df = pd.read_csv('data.csv')

# 2. 基本清洗
df = df.copy()
# 改为（无警告）
df = df.copy()
df = df.fillna({'Age': df['Age'].median(),
                'Embarked': 'S',
                'Fare': df['Fare'].median()})

# 3. 特征与标签
target = 'Survived'
num_features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']
cat_features = ['Sex', 'Embarked']
X = df[num_features + cat_features]
y = df[target]

# 4. 预处理：数值不变，类别 One-Hot
preprocess = ColumnTransformer(
    transformers=[
        ('num', 'passthrough', num_features),
        ('cat', OneHotEncoder(drop='first'), cat_features)
    ])

# 5. 训练 / 测试 划分
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y)

# 6. 建模 + 超参数网格搜索（可注释掉网格部分加速）
pipe = Pipeline(steps=[('prep', preprocess),
                      ('dt', DecisionTreeClassifier(random_state=42))])

param_grid = {
    'dt__max_depth': [3, 5, 7, 9, None],
    'dt__min_samples_split': [2, 5, 10],
    'dt__min_samples_leaf': [1, 2, 4]
}
grid = GridSearchCV(pipe, param_grid, cv=5, scoring='accuracy')
grid.fit(X_train, y_train)

print('最佳参数:', grid.best_params_)
print('交叉验证最佳准确率: {:.3f}'.format(grid.best_score_))

best_model = grid.best_estimator_

# 7. 测试集评估
y_pred = best_model.predict(X_test)
print('\n测试集准确率: {:.3f}'.format(accuracy_score(y_test, y_pred)))
print('\n分类报告:\n', classification_report(y_test, y_pred))

# 8. 混淆矩阵
plt.figure(figsize=(5, 4))
sns.heatmap(confusion_matrix(y_test, y_pred),
            annot=True, fmt='d', cmap='Blues',
            xticklabels=['Not Survive', 'Survive'],
            yticklabels=['Not Survive', 'Survive'])
plt.title('Confusion Matrix – Decision Tree')
plt.ylabel('Actual'); plt.xlabel('Predicted')
plt.tight_layout()
plt.savefig('confusion_matrix.png', dpi=120)
plt.show()

# 9. 可视化决策树（深度不要太大，否则图太乱）
# 先取出训练好的树
tree_model = best_model.named_steps['dt']
# 构造特征名（One-Hot 展开后）
feature_names = (num_features +
                 list(best_model.named_steps['prep']
                      .named_transformers_['cat']
                      .get_feature_names_out(cat_features)))

plt.figure(figsize=(20, 10))
plot_tree(tree_model,
          feature_names=feature_names,
          class_names=['Not Survive', 'Survive'],
          filled=True, rounded=True, max_depth=3)  # 只画前 3 层
plt.title('Decision Tree (max_depth=3 for display)')
plt.savefig('decision_tree.png', dpi=150)
plt.show()