import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix, mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LinearRegression



# 绘制混淆矩阵的通用函数
def plot_confusion_matrix(conf_matrix, title, label_mapping):
    plt.figure(figsize=(8, 6))
    sns.heatmap(conf_matrix, annot=True, fmt="d", cmap='coolwarm', 
                xticklabels=label_mapping.values(), yticklabels=label_mapping.values())
    plt.title(title, fontproperties=my_font)
    plt.xlabel('预测值', fontproperties=my_font)
    plt.ylabel('真实值', fontproperties=my_font)
    plt.show()

# 绘制分类报告的通用函数
def plot_classification_report(report, title):
    report_df = pd.DataFrame(report).transpose()
    plt.figure(figsize=(10, 6))
    sns.heatmap(report_df.iloc[:-1, :].T, annot=True, cmap='coolwarm')
    plt.title(title, fontproperties=my_font)
    plt.show()

# 训练与评估模型的通用函数
def train_and_evaluate(model, X_train, y_train, X_test, y_test):
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    
    # 获取分类报告
    report = classification_report(y_test, y_pred, output_dict=True, zero_division=1)
    
    # 获取混淆矩阵
    conf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2])
    
    return report, conf_matrix

# 模型初始化与训练
def initialize_and_train_models(X_train_scaled, y_train, X_test_scaled, y_test):
    models = {
        '支持向量机': SVC(kernel='rbf', probability=True),
        '朴素贝叶斯': GaussianNB(),
        'XGBoost': XGBClassifier(use_label_encoder=False, eval_metric='logloss'),
        'C4.5 决策树': DecisionTreeClassifier(criterion='entropy')
    }

    reports = {}
    conf_matrices = {}

    # 训练每个模型并返回报告和混淆矩阵
    for model_name, model in models.items():
        report, conf_matrix = train_and_evaluate(model, X_train_scaled, y_train, X_test_scaled, y_test)
        reports[model_name] = report
        conf_matrices[model_name] = conf_matrix

    return reports, conf_matrices

# 线性回归模型训练
def train_linear_regression(X_train_scaled, y_train, X_test_scaled, y_test):
    regressor_model = LinearRegression()
    regressor_model.fit(X_train_scaled, y_train)
    y_regressor_pred = regressor_model.predict(X_test_scaled)
    regressor_mse = mean_squared_error(y_test, y_regressor_pred)
    print(f"回归模型均方误差: {regressor_mse}")
    return regressor_mse

# 训练集占比曲线的绘制
def plot_train_size_vs_accuracy(train_sizes, X, y, X_test_scaled, y_test, models):
    accuracies = {model_name: [] for model_name in models.keys()}

    for train_size in train_sizes:
        X_train_part, _, y_train_part, _ = train_test_split(X, y, train_size=train_size, random_state=42)
        X_train_part_scaled = scaler.fit_transform(X_train_part)
        
        # 对每个模型进行训练并记录准确率
        for model_name, model in models.items():
            model.fit(X_train_part_scaled, y_train_part)
            accuracies[model_name].append(model.score(X_test_scaled, y_test))
    
    # 绘制训练集占比与准确率曲线
    plt.figure(figsize=(10, 6))
    for model_name, accuracy in accuracies.items():
        plt.plot(train_sizes, accuracy, label=model_name)
    plt.title('不同训练集占比下的准确率对比', fontproperties=my_font)
    plt.xlabel('训练集占比', fontproperties=my_font)
    plt.ylabel('准确率', fontproperties=my_font)
    plt.legend(prop=my_font)
    plt.show()

# 主程序
if __name__ == "__main__":
    # 模型训练与评估
    reports, conf_matrices = initialize_and_train_models(X_train_scaled, y_train, X_test_scaled, y_test)

    # 可视化分类报告与混淆矩阵
    for model_name, report in reports.items():
        plot_classification_report(report, f'{model_name} 分类报告')
        plot_confusion_matrix(conf_matrices[model_name], f'{model_name} 混淆矩阵', {0: '没有申请过', 1: '申请并获得', 2: '申请未获得'})
    
    # 线性回归模型
    train_linear_regression(X_train_scaled, y_train, X_test_scaled, y_test)
    
    # 绘制训练集占比曲线
    train_sizes = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    models = {
        '支持向量机': SVC(kernel='rbf', probability=True),
        '朴素贝叶斯': GaussianNB(),
        'XGBoost': XGBClassifier(use_label_encoder=False, eval_metric='logloss'),
        'C4.5 决策树': DecisionTreeClassifier(criterion='entropy')
    }
    plot_train_size_vs_accuracy(train_sizes, X, y, X_test_scaled, y_test, models)
