from random import randint, uniform
import numpy as np
import pandas as pd
from nltk import accuracy
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split, RandomizedSearchCV, learning_curve, cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
from sklearn.preprocessing import StandardScaler

plt.rcParams['font.sans-serif'] = ['SimHei']  # 正常显示汉字
plt.rcParams['axes.unicode_minus'] = False  # 正常显示符号

df = pd.read_csv('train.csv')
x = df[['Age', 'Pclass', 'Sex']].copy()
y = df['Survived']
x.dropna(axis=0, how='any')
x = pd.get_dummies(x)
x.rename(columns={'Sex_male': 'Sex'}, inplace=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
model = {
    'Decision Tree': {
        'model': DecisionTreeClassifier(),
        'params': {
            'max_depth': [3, 5, 7, 10],
            'min_samples_leaf': [2, 3, 4, 5, 6, 7, 8, 9, 10],
            'min_samples_split': [3, 4, 5, 6, 7, 8, 9, 10]
        }
    },
    'Random Forest': {
        'model': RandomForestClassifier(),
        'params': {
            'max_depth': [3, 5, 7, 10],
            'min_samples_leaf': [2, 3, 4, 5, 6, 7, 8, 9, 10],
            'min_samples_split': [3, 4, 5, 6, 7, 8, 9, 10]
        }
    },
    'Gradient Boosting': {
        'model': GradientBoostingClassifier(),
        'params': {
            'max_depth': [3, 5, 7, 10],
            'min_samples_leaf': [2, 3, 4, 5, 6, 7, 8, 9, 10],
            'min_samples_split': [3, 4, 5, 6, 7, 8, 9, 10]
        }
    }
}
best_models = {}
best_scores = {}
for name, config in model.items():
    print(f"\n正在优化 {name}...")
    search = RandomizedSearchCV(
        config['model'],
        config['params']
    )
    search.fit(x_train, y_train)
    best_models[name] = search.best_estimator_
    best_scores[name] = search.best_score_
    print(f"{name} 最佳分数: {search.best_score_:.4f}")
    print(f"{name} 最佳交叉验证分数: {search.best_score_:.4f}")
best_model_name = max(best_scores, key=best_scores.get)
best_model = best_models[best_model_name]
print(f"\n=== 最终选择的最佳模型: {best_model_name} ===")
y_pred = best_model.predict(x_test)
print(f"测试集准确率: {accuracy:.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred))
print("\n=== 所有模型测试集表现对比 ===")
for name, model in best_models.items():
    y_pred_temp = model.predict(x_test)
    test_acc = accuracy_score(y_test, y_pred_temp)
    print(f"{name}: {test_acc:.4f} (交叉验证: {best_scores[name]:.4f})")


def plot_learning_curve_diagnosis(model, x, y, title="学习曲线诊断"):
    #学习曲线
    tran_size, tran_score, test_score = learning_curve(
        model, x, y, scoring='accuracy', cv=5)
    train_mean = np.mean(tran_score, axis=1)
    train_std = np.std(tran_score, axis=1)
    test_mean = np.mean(test_score, axis=1)
    test_std = np.std(test_score, axis=1)
    plt.figure(figsize=(10, 10))
    plt.title(title)
    plt.fill_between(tran_size, train_mean - train_std, train_mean + train_std, alpha=0.2, color='g')
    plt.fill_between(tran_size, test_std - test_std, test_mean + test_std, alpha=0.2, color='r')
    plt.legend()
    plt.grid(True, alpha=0.2)
    final_gap = train_mean[-1] - test_mean
    final_test_gap = test_mean[-1]

    print(f"\n=== 学习曲线诊断结果 ===")
    print(f"最终训练集得分: {train_mean[-1]:.4f}")
    print(f"最终测试集得分: {test_mean[-1]:.4f}")
    print(f"性能差距: {final_gap:.4f}")

    if final_gap > 0.1 and final_test_gap < 0.7:
        print("🔴 诊断: 明显过拟合")
        print("💡 建议: 增加正则化、减少模型复杂度、增加数据")
    elif final_gap < 0.05 and final_test_gap < 0.6:
        print("🔵 诊断: 可能欠拟合")
        print("💡 建议: 增加模型复杂度、增加特征、减少正则化")
    elif final_gap < 0.1 and final_test_gap > 0.7:
        print("🟢 诊断: 泛化能力良好")
    else:
        print("🟡 诊断: 轻微过拟合")
        print("💡 建议: 轻微调整正则化参数")
