import pandas as pd


train_data = pd.read_csv('data/train.csv')
print(train_data.info())

y = train_data['Survived']
train_data['Sex'] = train_data['Sex'].map({'male': 1, 'female': 0})
train_data.drop(['Survived', 'PassengerId', 'Name', 'Ticket', 'Fare', 'Cabin', 'Embarked'], axis=1, inplace=True) #删除列方法
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
X = train_data

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)

# 使用单一的决策树进行预测
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
dtc_y_predict = dtc.predict(X_test)
print(dtc.score(X_test, y_test))

# 随机森林进行模型训练和预测分析
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(max_depth=6, random_state=9)
rfc.fit(X_train, y_train)
rfc_y_predict = rfc.predict(X_test)
print(rfc.score(X_test, y_test))

from sklearn.metrics import classification_report
print(classification_report(y_test,dtc_y_predict))
print(classification_report(y_test,rfc_y_predict))

#进行参数调整 使用交叉验证
# 初始化模型
rf = RandomForestClassifier(random_state=9)

# 优化后的参数网格
params = {
    'n_estimators': [50, 100, 150],  # 适当扩大范围
    'max_depth': [None, 6, 9, 12],    # None表示不限制
    'min_samples_split': [2, 5, 10],
    'max_features': ['sqrt', 'log2'],  # 特征选择方式
    'criterion': ['gini', 'entropy']   # 分裂标准
}

from sklearn.model_selection import GridSearchCV, StratifiedKFold
# 使用分层K折交叉验证
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=9)

# 并行网格搜索
estimator = GridSearchCV(
    estimator=rf,
    param_grid=params,
    cv=cv,
    n_jobs=-1,  # 使用所有CPU核心
    verbose=2,  # 显示详细进度
    scoring='accuracy'  # 可改为'f1'/'roc_auc'等
)

# 执行搜索
estimator.fit(X_train, y_train)

# 输出最佳结果
print("最佳参数组合:", estimator.best_params_)
print("最佳交叉验证分数:", estimator.best_score_)