import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN


# 读取文件
def read_file(file_path):
    df = pd.read_csv(file_path)
    return df


# 数据预处理
def data_preprocessing(df):
    churn_result = df['Churn?']
    y = np.where(churn_result == 'True.', 1.0, 0)  # 将字符串转化为二值变量
    to_drop = ['State', 'Area Code', 'Phone', 'Churn?']
    churn_feat_space = df.drop(to_drop, axis=1)
    yes_no_cols = ["Int'l Plan", "VMail Plan"]
    churn_feat_space[yes_no_cols] = churn_feat_space[yes_no_cols] == 'yes'
    X = churn_feat_space.values.astype(np.float64)
    return X, y


# 数据标准化
def standardization(X):
    scaler = StandardScaler()
    X = scaler.fit_transform(X)
    return X


# 五折交叉验证
def run_cv(X, y, clf_class, **kwargs):
    kf = KFold(n_splits=5, shuffle=True)
    y_pred = y.copy()
    for train_index, test_index in kf.split(X):
        X_train, X_test = X[train_index], X[test_index]
        y_train = y[train_index]
        clf = clf_class(**kwargs)
        clf.fit(X_train, y_train)
        y_pred[test_index] = clf.predict(X_test)
    return y_pred


# 调整prob阈值
def run_prob_cv(X, y, clf_class, **kwargs):
    kf = KFold(n_splits=5, shuffle=True)
    y_prob = np.zeros((len(y), 2))
    for train_index, test_index in kf.split(X):
        X_train, X_test = X[train_index], X[test_index]
        y_train = y[train_index]
        clf = clf_class(**kwargs)
        clf.fit(X_train, y_train)
        y_prob[test_index] = clf.predict_proba(X_test)
    return y_prob


# 可视化
def visualization(counts):
    plt.rcParams['font.sans-serif'] = ['SimSun']
    # 绘制折线图
    plt.plot(counts.index, counts['true_prob'], label='真实概率（true_prob）', marker='o')
    plt.plot(counts.index, counts['pred_prob'], label='预测概率（pred_prob）', marker='s')
    plt.ylabel('概率值')
    plt.title('真实概率与预测概率比较')
    plt.legend()  # 添加图例，便于区分两条折线
    plt.grid(True)  # 添加网格线，使图表更清晰
    plt.show()
    # 绘制横向直方图
    plt.barh(y=counts['pred_prob'], width=counts['count'], height=0.05, color='blue', edgecolor='black')
    plt.yticks(np.arange(0, 1.1, 0.1))
    plt.xlim(0, 2000)
    plt.title('不同预测概率频数')
    plt.xlabel('频数count')
    plt.ylabel('概率')
    plt.grid(axis='x', linestyle='--', alpha=0.7)
    plt.show()


# 定义主函数
def main():
    df = read_file('./data/churn.csv')
    X, y = data_preprocessing(df)
    X = standardization(X)
    # 带入三种模型
    run_cv(X, y, SVC)
    run_cv(X, y, RF)
    run_cv(X, y, KNN)
    print("准确率结果如下：")
    print(f"支持向量机:{accuracy_score(y, run_cv(X, y, SVC)) * 100:.2f}%")
    print(f"随机森林:{accuracy_score(y, run_cv(X, y, RF)) * 100:.2f}%")
    print(f"K近邻:{accuracy_score(y, run_cv(X, y, KNN)) * 100:.2f}%")
    pred_prob = run_prob_cv(X, y, RF, n_estimators=10)
    pred_churn = pred_prob[:, 1]
    is_churn = y == 1
    counts = pd.value_counts(pred_churn)
    true_prob = {}
    for prob in counts.index:
        true_prob[prob] = np.mean(is_churn[pred_churn == prob])
        true_prob = pd.Series(true_prob)
    counts = pd.concat([counts, true_prob], axis=1).reset_index()
    counts.columns = ['pred_prob', 'count', 'true_prob']
    counts = counts.sort_values(by='pred_prob', ascending=True).reset_index(drop=True)
    visualization(counts)


if __name__ == "__main__":
    main()
