# 1.导包
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
from matplotlib import pyplot as plt
import seaborn as sns  # seaborn基于Matplotlib的一个画图工具


# 2.读取数据
df = pd.read_csv("churn.csv", encoding='UTF-8')
# print(df.head())

# 3.数据预处理
# 3.1 数据热编码处理
one_hot_df = pd.get_dummies(df)
# print(one_hot_df.head())

# 3.2 删除冗余的特征列
new_df = one_hot_df.drop(['gender_Male', 'Churn_No'], axis=1) # 以列的形式删除没有用的行
# print(new_df)

# 3.3 得到 特征和目标
# x_df = new_df[["gender_Female","Partner_att","Dependents_att","MonthlyCharges","TotalCharges","StreamingTV"]]     # 获取到特征值
x_df = new_df.iloc[:, :-2]                                # 获取到部分特征值
x_df = pd.concat([x_df, new_df.iloc[:, -1]], axis=1)  # 获取到所有特征值
# print(x_df)

y_df = new_df.iloc[:, -2]    # 获取到目标值
# print(y_df.head())


# 3.4 划分数据集
x_train, x_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=922)

# 4.特征工程
# 4.1 特征预处理
transformer = StandardScaler()
x_train = transformer.fit_transform(x_train)    # 学习、处理训练特征数据
x_test = transformer.transform(x_test)


# 5.模型构建
model = LogisticRegression(max_iter=100, solver="sag")      # 创建模型实例对象
model.fit(x_train, y_train)     # 模型训练

# 5.1 创建交叉验证和网格搜索
# 交叉验证
# param_grid = {
#     "solver": ["liblinear", "sag", "saga"],
#     "max_iter": [100, 150, 200]
# }
#
# # 网格搜索
# estimator = GridSearchCV(model, param_grid=param_grid, cv=4)
# estimator.fit(x_train, y_train)
#
# print(f"输出最优参数：{estimator.best_params_}")

# 6.模型评估
# 6.1 模型预测
y_predict = model.predict(x_test)   # 对模型进行预测

# 6.2 模型指标评估
print(f"精确率：{precision_score(y_test, y_predict)}")  # 真实值和预测值的对比
print(f"召回率：{recall_score(y_test, y_predict)}")  # 真实值和预测值的对比
print(f"f1分数：{f1_score(y_test, y_predict)}")  # 真实值和预测值的对比

"""
    macro avg：全局平均值。计算公式=(正例的概率值+反例的概率值)/2
    weighted avg：加权平均值。计算公式=(正例的概率值*正例的样本条数+反例的概率值*反例的样本条数)/总样本条数
    digits：各个指标保留的小数位数
"""
print(f"分类报告评估：{classification_report(y_test, y_predict,digits=4)}")  # 真实值和预测值的对比
