from random import randint, uniform

import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split, RandomizedSearchCV, learning_curve
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
from sklearn.preprocessing import StandardScaler

plt.rcParams['font.sans-serif'] = ['SimHei']  # 正常显示汉字
plt.rcParams['axes.unicode_minus'] = False  # 正常显示符号

"""
演示决策树分层
"""
df = pd.read_csv('train.csv')
#Age列空值填充： Age列平均值

df['Age'] = df['Age'].fillna(df['Age'].mean())
# 1、读取数据: train.csv

#2、数据预处理
# 特征列:  船舱等级: Pclass 、 性别:Sex 、年龄:Age
#标签列: Survived
x = df[['Pclass', 'Sex', 'Age']]
y = df['Survived']
#分析 age需要处理 采用热编码
x = pd.get_dummies(x)
x.drop(columns=['Sex_female'], inplace=True)
x.rename(columns={'Sex_male': 'Sex'}, inplace=True)
#拆分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=20)
#3、特征工程: 标准化
scaler = StandardScaler()
xtrain_scaled = scaler.fit_transform(x_train)
xtest_scaled = scaler.transform(x_test)
#4、训练模型: DecisionTreeClassifier
models = {
    'DecisionTree': {
        'model': DecisionTreeClassifier(random_state=20),
        'params': {
            'max_depth': [i for i in range(3, 20)],
            'min_samples_split': [i for i in range(3, 20)],
            'min_samples_leaf': [i for i in range(3, 20)],
            'criterion': ['gini', 'entropy'],
            'max_features': [None]
        }
    },
    'RandomForest': {
        'model': RandomForestClassifier(random_state=20),
        'params': {
            'n_estimators': [i for i in range(3, 50)],
            'max_depth': [i for i in range(3, 20)],
            'min_samples_split': [i for i in range(3, 20)],
            'min_samples_leaf': [i for i in range(3, 20)],
            'max_features': [None],
            'bootstrap': [True, False]
        }
    },
    'GradientBoosting': {
        'model': GradientBoostingClassifier(random_state=20),
        'params': {
            'n_estimators': [i for i in range(3, 20)],
            'learning_rate': [i for i in range(3, 20)],
            'max_depth': [i for i in range(3, 20)],
            'min_samples_split': [i for i in range(3, 20)],
            'min_samples_leaf': [i for i in range(3, 20)],
            'subsample': [0.6, 0.7, 0.8, 0.9, 1.0]
        }
    }
}

best_models = {}
best_scores = {}
for name, config in models.items():
    print(f"\n正在优化 {name}...")
    search = RandomizedSearchCV(
        config['model'],
        config['params'],
        n_iter=50,
        cv=5,
        scoring='accuracy',
        random_state=20,
        n_jobs=-1,
        verbose=0
    )
    search.fit(xtrain_scaled, y_train)
    best_models[name] = search.best_estimator_
    best_scores[name] = search.best_score_
    print(f"{name} 最佳分数: {search.best_score_:.4f}")
    print(f"{name} 最佳交叉验证分数: {search.best_score_:.4f}")
#5、模型评估:
best_model_name = max(best_scores, key=best_scores.get)
best_model = best_models[best_model_name]

print(f"\n=== 最终选择的最佳模型: {best_model_name} ===")
y_pred = best_model.predict(xtest_scaled)

# 模型评估
accuracy = accuracy_score(y_test, y_pred)
print(f"测试集准确率: {accuracy:.4f}")

# 分类报告
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=['Died', 'Survived']))

# 对比所有模型的测试集表现
print("\n=== 所有模型测试集表现对比 ===")
for name, model in best_models.items():
    y_pred_temp = model.predict(xtest_scaled)
    test_accuracy = accuracy_score(y_test, y_pred_temp)
    print(f"{name}: {test_accuracy:.4f} (交叉验证: {best_scores[name]:.4f})")

def plot_learning_curve_diagnosis(model, x, y, title="学习曲线诊断"):
    """绘制学习曲线诊断过拟合/欠拟合"""

    # 获取学习曲线数据
    train_sizes, train_scores, test_scores = learning_curve(
        model, x, y, cv=5,
        train_sizes=np.linspace(0.1, 1.0, 10),
        scoring='accuracy' if len(np.unique(y)) <= 2 else 'r2',
        random_state=20
    )

    # 计算统计量
    train_mean = np.mean(train_scores, axis=1)
    train_std = np.std(train_scores, axis=1)
    test_mean = np.mean(test_scores, axis=1)
    test_std = np.std(test_scores, axis=1)

    # 绘图
    plt.figure(figsize=(12, 8))

    plt.plot(train_sizes, train_mean, 'o-', color='blue', linewidth=2, label='训练集')
    plt.plot(train_sizes, test_mean, 'o-', color='red', linewidth=2, label='测试集')

    # 添加置信区间
    plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.2, color='blue')
    plt.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.2, color='red')

    plt.xlabel('训练样本数量')
    plt.ylabel('得分')
    plt.title(title)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 诊断分析
    final_gap = train_mean[-1] - test_mean[-1]
    final_test_score = test_mean[-1]

    print(f"\n=== 学习曲线诊断结果 ===")
    print(f"最终训练集得分: {train_mean[-1]:.4f}")
    print(f"最终测试集得分: {test_mean[-1]:.4f}")
    print(f"性能差距: {final_gap:.4f}")

    if final_gap > 0.1 and final_test_score < 0.7:
        print("🔴 诊断: 明显过拟合")
        print("💡 建议: 增加正则化、减少模型复杂度、增加数据")
    elif final_gap < 0.05 and final_test_score < 0.6:
        print("🔵 诊断: 可能欠拟合")
        print("💡 建议: 增加模型复杂度、增加特征、减少正则化")
    elif final_gap < 0.1 and final_test_score > 0.7:
        print("🟢 诊断: 泛化能力良好")
    else:
        print("🟡 诊断: 轻微过拟合")
        print("💡 建议: 轻微调整正则化参数")


