import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

#导入数据
data1=pd.read_csv('data_train.csv')
data1=data1.iloc[:,2:]
print(data1)
#查看数据基本信息
data1.info()
#发现TotalCharges与MonthlyCharges数据类型不符，将TotalCharges转化为float64
l1=[len(i.split()) for i in data1['TotalCharges']]
l2=[i for i in range(len(l1)) if l1[i] !=1]
print(f'含有空格的数据索引：{l2}')
#通过索引观察到TotalCharges为空的数据的使用月份为0，则将TotalCharges替换为0
for i in l2:
    data1.loc[i,'TotalCharges'] = 0
#将TotalCharges转化为float64
data1['TotalCharges'] = data1['TotalCharges'].astype(float)
#画图查看是否有缺失值
import seaborn as sns
sns.heatmap(data1.isnull(),cmap='Greens_r',cbar=False)
plt.show()
#设置画布
fig, axes = plt.subplots(4, 5, figsize=(18, 18))
# 遍历数据集中的每个特征，并在子图中绘制对应的分布图
for i, column in enumerate(data1.columns):
    row = i // 5  # 计算行索引
    col = i % 5   # 计算列索引
    sns.histplot(data1[column], kde=True, ax=axes[row, col])
    axes[row, col].set_title(f'Distribution of {column}')
    axes[row, col].set_xlabel(column)
    axes[row, col].set_ylabel('Count')
plt.tight_layout()
plt.show()
#对分类数据编码
from sklearn.preprocessing import LabelEncoder
#分类数据编码
le=LabelEncoder()
df1=data1.copy(deep=True)
text_data_features=[i for i in list(data1.columns) if i not in list(data1.describe().columns)]
#输出编码后的数值数据表示
for i in text_data_features:
    df1[i]=le.fit_transform(df1[i])
    print(i,': ',df1[i].unique(),' = ',le.inverse_transform(df1[i].unique()))
colors=['#9FE2BF','#87CEEB']
#流失与不流失分组
label=df1[df1['label']==1].describe().T
not_label=df1[df1['label']==0].describe().T

fig,ax=plt.subplots(1,2,figsize=(5,5))

plt.subplot(1,2,1)
sns.heatmap(label[['mean']], annot=True, cmap=colors, linewidths=0.4, cbar=False, fmt='.2f')
plt.title("label Customers")

plt.subplot(1,2,2)
sns.heatmap(not_label[['mean']], annot=True, cmap=colors, linewidths=0.4,  cbar=False, fmt='.2f')
plt.title("Not label Customers")

fig.tight_layout(pad=0)
plt.show()
#从用户维度进行分析
n1=['gender','SeniorCitizen','Partner','Dependents']
fig = plt.subplots(nrows = 2,ncols = 2,figsize = (20,14))
for i in range(4):
    plt.subplot(2,2,i+1)
    ax = sns.countplot(x=n1[i],data = df1 ,hue = "label",palette = colors)
    for rect in ax.patches:
        ax.text(rect.get_x() + rect.get_width() / 2, rect.get_height() + 2, rect.get_height(), horizontalalignment='center', fontsize = 11)
    title = n1[i] + ' vs label'
    plt.title(title)
plt.show()
#从服务维度进行分析
n2=['PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies']
fig = plt.subplots(nrows = 3,ncols = 3,figsize = (20,14))
for i in range(9):
    plt.subplot(3,3,i+1)
    ax = sns.countplot(x=n2[i],data = df1 ,hue = "label",palette = colors)
    for rect in ax.patches:
        ax.text(rect.get_x() + rect.get_width() / 2, rect.get_height() + 2, rect.get_height(), horizontalalignment='center', fontsize = 11)
    title = n2[i] + ' vs label'
    plt.title(title)
plt.show()
#对其余维度进行分析
n3=['Contract','PaperlessBilling','PaymentMethod']
fig = plt.subplots(nrows = 1,ncols = 3,figsize = (20,14))
for i in range(3):
    plt.subplot(1,3,i+1)
    ax = sns.countplot(x=n3[i],data = df1 ,hue = "label",palette = colors)
    for rect in ax.patches:
        ax.text(rect.get_x() + rect.get_width() / 2, rect.get_height() + 2, rect.get_height(), horizontalalignment='center', fontsize = 11)
    title = n3[i] + ' vs label'
    plt.title(title)
plt.show()
#对数值特征进行分析
n4=['tenure','MonthlyCharges','TotalCharges']
fig = plt.subplots(nrows = 1,ncols = 3,figsize = (20,7))
for i in range(3):
    plt.subplot(1,3,i+1)
    ax = sns.kdeplot(x=n4[i],data = df1,hue = "label",palette = colors, shade='True')
    for rect in ax.patches:
        ax.text(rect.get_x() + rect.get_width() / 2, rect.get_height() + 2, rect.get_height(), horizontalalignment='center', fontsize = 11)
    title = n4[i] + ' vs label'
    plt.title(title)
plt.show()
#计算协方差，观察相关性
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
#数据标准化
df1['tenure'] = mms.fit_transform(df1[['tenure']])
df1['MonthlyCharges'] = mms.fit_transform(df1[['MonthlyCharges']])
df1['TotalCharges'] = mms.fit_transform(df1[['TotalCharges']])
plt.figure(figsize = (20,5))
sns.heatmap(df1.corr(),cmap = 'coolwarm',annot = True)
plt.show()
#删除相关性较小的特征
df1.drop(columns = ['PhoneService', 'gender','StreamingTV','StreamingMovies','MultipleLines','InternetService'],inplace = True)
#建模准备
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import RocCurveDisplay
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from sklearn.model_selection import RepeatedStratifiedKFold

def model(classifier,x_train,y_train,x_test,y_test):
    classifier.fit(x_train,y_train)
    prediction = classifier.predict(x_test)
    cv = RepeatedStratifiedKFold(n_splits = 10,n_repeats = 3,random_state = 1)
    print("Cross Validation Score : ",'{0:.2%}'.format(cross_val_score(classifier,x_train,y_train,cv = cv,scoring = 'roc_auc').mean()))
    print("ROC_AUC Score : ",'{0:.2%}'.format(roc_auc_score(y_test,prediction)))
    RocCurveDisplay.from_estimator(classifier, x_test,y_test)
    plt.title('ROC_AUC_Plot')
    plt.show()

def model_evaluation(classifier,x_test,y_test):
    cm = confusion_matrix(y_test,classifier.predict(x_test))
    names = ['True Negative','False Positive','False Negative','True Positive']
    counts = [value for value in cm.flatten()]
    percentages = ['{0:.2%}'.format(value) for value in cm.flatten()/np.sum(cm)]
    labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(names,counts,percentages)]
    labels = np.asarray(labels).reshape(2,2)
    sns.heatmap(cm,annot = labels,cmap = 'Blues',fmt ='')
    print(classification_report(y_test,classifier.predict(x_test)))

#按8：2划分训练集与验证集
X=df1.iloc[:,0:13]
Y=df1.iloc[:,13:]
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=2)

#决策树模型
from sklearn.tree import DecisionTreeClassifier
classifier_dt = DecisionTreeClassifier(random_state = 1000,max_depth = 4,min_samples_leaf = 1)
model(classifier_dt,x_train,y_train,x_test,y_test)
model_evaluation(classifier_dt,x_test,y_test)

#XGBoots模型
from xgboost import XGBClassifier
classifier_xgb = XGBClassifier(learning_rate= 0.01,max_depth = 3,n_estimators = 1000)
model(classifier_xgb,x_train,y_train,x_test,y_test)
model_evaluation(classifier_xgb,x_test,y_test)