import numpy as np
import pandas as pd
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression as LR
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.ensemble import AdaBoostClassifier as Adaboost
from xgboost import XGBClassifier as XGB
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.neighbors import NearestNeighbors

# 忽略警告
warnings.filterwarnings('ignore')

# 读取数据
data = pd.read_csv('./Telco-Customer-Churn.csv')
pd.set_option('display.max_columns', None)  # 显示所有列
print(data.head(5))

# 重复值统计
dupNum = data.shape[0] - data.drop_duplicates().shape[0]
print(f"数据集中有 {dupNum} 条重复值")

# 检查是否有缺失值
print(data.isnull().any())

# 查看 'TotalCharges' 的缺失值
print(data[data['TotalCharges'] == ' '])

# 将 'TotalCharges' 转为数值
data['TotalCharges'] = pd.to_numeric(data['TotalCharges'], errors='coerce')

print(f"此时 TotalCharges 是否已经转换为浮点型： {data['TotalCharges'].dtype == 'float'}")
print(f"此时 TotalCharges 存在 {data['TotalCharges'].isnull().sum()} 行缺失样本。")

# 使用 0 填充缺失值
data['TotalCharges'] = data['TotalCharges'].fillna(0)
print(f"采用固定值填充方法还存在 {data['TotalCharges'].isnull().sum()} 行缺失样本。")

# 用 MonthlyCharges 填充 TotalCharges 的缺失值
data['TotalCharges'] = data['TotalCharges'].fillna(data['MonthlyCharges'])
print(data[data['tenure'] == 0][['MonthlyCharges', 'TotalCharges']])  # 观察处理后缺失值变化情况

# 数据描述
print(data.describe())

# 绘制箱型图观察异常值
fig = plt.figure(figsize=(15, 6))

# tenure特征
ax1 = fig.add_subplot(311)
ax1.boxplot(data['tenure'], vert=False, showmeans=True, flierprops={"marker": "o", "markerfacecolor": "steelblue"})
ax1.set_title('tenure')

# MonthlyCharges特征
ax2 = fig.add_subplot(312)
ax2.boxplot(data['MonthlyCharges'], vert=False, showmeans=True,
            flierprops={"marker": "o", "markerfacecolor": "steelblue"})
ax2.set_title('MonthlyCharges')

# TotalCharges
ax3 = fig.add_subplot(313)
ax3.boxplot(data['TotalCharges'], vert=False, showmeans=True,
            flierprops={"marker": "o", "markerfacecolor": "steelblue"})
ax3.set_title('TotalCharges')

plt.tight_layout(pad=1.5)
plt.show()

# 类别不平衡问题分析
p = data['Churn'].value_counts()
plt.figure(figsize=(10, 6))
patches, l_text, p_text = plt.pie(p, labels=['No', 'Yes'], autopct='%1.2f%%', explode=(0, 0.1))

# 调整字体大小
for t in p_text:
    t.set_size(15)
for t in l_text:
    t.set_size(15)

plt.show()

# 其他特征与流失的关系分析
baseCols = ['gender', 'SeniorCitizen', 'Partner', 'Dependents']

for col in baseCols:
    cnt = pd.crosstab(data[col], data['Churn'])
    cnt.plot.bar(stacked=True)
    plt.show()

# 绘制流失率与入网月数的关系
groupDf = data[['tenure', 'Churn']]
groupDf['Churn'] = groupDf['Churn'].map({'Yes': 1, 'No': 0})
pctDf = groupDf.groupby(['tenure']).sum() / groupDf.groupby(['tenure']).count()
pctDf = pctDf.reset_index()

plt.figure(figsize=(10, 5))
plt.plot(pctDf['tenure'], pctDf['Churn'], label='Churn percentage')
plt.legend()
plt.show()

# 标准化数值特征
scaler = StandardScaler()
data[['tenure']] = scaler.fit_transform(data[['tenure']])
data[['MonthlyCharges']] = scaler.fit_transform(data[['MonthlyCharges']])
data[['TotalCharges']] = scaler.fit_transform(data[['TotalCharges']])

print(data[['tenure', 'MonthlyCharges', 'TotalCharges']].head())

# 类别特征编码
encodeCols = list(data.columns[3:17].drop(
    ['tenure', 'PhoneService', 'InternetService', 'StreamingTV', 'StreamingMovies', 'Contract']))
for col in encodeCols:
    data[col] = data[col].map({'Yes': 1, 'No': 0})

# 目标变量编码
data['Churn'] = data['Churn'].map({'Yes': 1, 'No': 0})

# 独热编码
onehotCols = ['InternetService', 'Contract', 'PaymentMethod']
churnDf = data['Churn'].to_frame()
featureDf = data.drop(['Churn'], axis=1)

for col in onehotCols:
    onehotDf = pd.get_dummies(featureDf[col], prefix=col)
    featureDf = pd.concat([featureDf, onehotDf], axis=1)

data = pd.concat([featureDf, churnDf], axis=1)
data = data.drop(onehotCols, axis=1)

# 删除无用特征
data = data.drop(['customerID', 'gender', 'PhoneService', 'StreamingTV', 'StreamingMovies'], axis=1)
data = data.dropna()

print(data.head(10))

# 计算数值特征之间的相关性
nu_fea = data[['tenure', 'MonthlyCharges', 'TotalCharges']]
pearson_mat = data[nu_fea.columns].corr(method='spearman')

plt.figure(figsize=(8, 8))
sns.heatmap(pearson_mat, square=True, annot=True, cmap="YlGnBu")
plt.show()


# K折交叉验证
def kFold_cv(X, y, classifier, **kwargs):
    kf = KFold(n_splits=5, shuffle=True)
    y_pred = np.zeros(len(y))

    for train_index, test_index in kf.split(X):
        X_train = X[train_index]
        X_test = X[test_index]
        y_train = y[train_index]
        clf = classifier(**kwargs)
        clf.fit(X_train, y_train)
        y_pred[test_index] = clf.predict(X_test)

    return y_pred


# 模型评估
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

lr_pred = kFold_cv(X, y, LR)
svc_pred = kFold_cv(X, y, SVC)
rf_pred = kFold_cv(X, y, RF)
ada_pred = kFold_cv(X, y, Adaboost)
xgb_pred = kFold_cv(X, y, XGB)

# 输出评估结果
scoreDf = pd.DataFrame(columns=['LR', 'SVC', 'RandomForest', 'AdaBoost', 'XGBoost'])
pred = [lr_pred, svc_pred, rf_pred, ada_pred, xgb_pred]

for i in range(5):
    r = recall_score(y, pred[i])
    p = precision_score(y, pred[i])
    f1 = f1_score(y, pred[i])
    scoreDf.iloc[:, i] = pd.Series([r, p, f1])

scoreDf.index = ['Recall', 'Precision', 'F1-score']
print(scoreDf)

# 特征重要性
kf = KFold(n_splits=5, shuffle=True, random_state=0)
y_pred = np.zeros(len(y))
clf = RF()

for train_index, test_index in kf.split(X):
    X_train = X[train_index]
    X_test = X[test_index]
    y_train = y[train_index]
    clf.fit(X_train, y_train)
    y_pred[test_index] = clf.predict(X_test)

feature_importances = pd.DataFrame(clf.feature_importances_,
                                   index=data.columns.drop(['Churn']),
                                   columns=['importance']).sort_values('importance', ascending=False)

# 绘制特征重要性图
plt.figure(figsize=(10, 6))
feature_importances.head(10).plot.bar()
plt.title("Feature Importance")
plt.show()

# 寻找异常值
nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)

# 绘制距离
plt.figure(figsize=(10, 6))
plt.hist(distances[:, 1], bins=50)
plt.title("Distances to Nearest Neighbors")
plt.show()


from sklearn.linear_model import LogisticRegression as LR  # Logistic Regression
from sklearn.svm import SVC  # Support Vector Machine
from sklearn.ensemble import RandomForestClassifier as RF  # Random Forest
from sklearn.ensemble import AdaBoostClassifier as Adaboost  # AdaBoost
from xgboost import XGBClassifier as XGB  # XGBoost

# X = data.iloc[:, :-1].as_matrix()
X = data.iloc[:, :-1].iloc[:, :].values  # Kagging
y = data.iloc[:, -1].values


lr_pred = kFold_cv(X, y, LR)
svc_pred = kFold_cv(X, y, SVC)
rf_pred = kFold_cv(X, y, RF)
ada_pred = kFold_cv(X, y, Adaboost)
xgb_pred = kFold_cv(X, y, XGB)

from sklearn.metrics import precision_score, recall_score, f1_score

scoreDf = pd.DataFrame(columns=['LR', 'SVC', 'RandomForest', 'AdaBoost', 'XGBoost'])
pred = [lr_pred, svc_pred, rf_pred, ada_pred, xgb_pred]
for i in range(5):
    r = recall_score(y, pred[i])
    p = precision_score(y, pred[i])
    f1 = f1_score(y, pred[i])
    scoreDf.iloc[:, i] = pd.Series([r, p, f1])

scoreDf.index = ['Recall', 'Precision', 'F1-score']
scoreDf


X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

kf = KFold(n_splits=5, shuffle=True, random_state=0)
y_pred = np.zeros(len(y))
clf = RF()

for train_index, test_index in kf.split(X):
    X_train = X[train_index]
    X_test = X[test_index]
    y_train = y[train_index]
    clf.fit(X_train, y_train)
    y_pred[test_index] = clf.predict(X_test)

feature_importances = pd.DataFrame(clf.feature_importances_,
                                   index=data.columns.drop(['Churn']),
                                   columns=['importance']).sort_values('importance', ascending=False)
feature_importances


def prob_cv(X, y, classifier, **kwargs):

    kf = KFold(n_splits=5, shuffle=True, random_state=0)
    y_pred = np.zeros(len(y))

    for train_index, test_index in kf.split(X):
        X_train = X[train_index]
        X_test = X[test_index]
        y_train = y[train_index]
        clf = classifier(**kwargs)
        clf.fit(X_train, y_train)
        y_pred[test_index] = clf.predict_proba(X_test)[:, 1]

    return y_pred


prob = prob_cv(X, y, RF)
prob = np.round(prob, 1)

probDf = pd.DataFrame(prob)
churnDf = pd.DataFrame(y)
df1 = pd.concat([probDf, churnDf], axis=1)
df1.columns = ['prob', 'churn']

df1 = df1[:7043]
df1.head(10)


group = df1.groupby(['prob'])
cnt = group.count()
true_prob = group.sum() / group.count()
df2 = pd.concat([cnt, true_prob], axis=1).reset_index()
df2.columns = ['prob', 'cnt', 'true_prob']

print(df2)
