# 绘图库
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt

import numpy as np
import pandas as pd
total_copy = pd.read_csv(r'./data/数据清洗/total_copy包含多重共线特征.csv')

# 4.特征工程
# 4.1 特征选择
# 1.先用方差过滤和互信息法调整
# 2.用“嵌入法”对特征进行选择，嵌入法所用机器学习为“随机森林”，可以根据需要自行更换机器学习类型
# 4.1.1 方差过滤
# 提取出客户ID列并删除
IDdf = total_copy[['客户ID']]
total_copy = total_copy.drop(['客户ID'],axis = 1)
IDdf.to_csv(r'./data/ID.csv',index = False, encoding = 'utf-8')

# 导入方差过滤
from sklearn.feature_selection import VarianceThreshold
# 实例化方差过滤,消除方差为0的特征
VT = VarianceThreshold()
VT_df = VT.fit_transform(total_copy)
print(VT_df.shape)

# 4.1.2 互信息法过滤
# 训练集对应的标签
labeldf = pd.read_csv(r'./data/label.csv')
labeldf['是否流失'] = labeldf['是否流失'].astype('int')
y = labeldf['是否流失']
# 导入互信息分类
from sklearn.feature_selection import mutual_info_classif as MIC
from sklearn.feature_selection import SelectKBest
# 训练集
VT_df = pd.DataFrame(VT_df)
VT_train = VT_df.iloc[:150000,:]
# 返回每个特征和label之间的估计相互信息
result = MIC(VT_train,y,random_state = 2022)
print(result)

# 获得K值的办法
k = result.shape[0] - sum(result <= 0)
SK = SelectKBest(MIC,k=k)
MIC_xtrain = SK.fit_transform(VT_train,y)
# 对测试集进行同等操作
VT_test = VT_df.iloc[150000:,:]
MIC_xtest = SK.transform(VT_test)
print(MIC_xtrain.shape,MIC_xtest.shape)

# 4.1.3 嵌入法
# 导入SelectFromModel(嵌入法)
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
# 训练集
feature_train = MIC_xtrain
# 训练集对应的标签
train_y = y
# 测试集
feature_test = MIC_xtest

# 记录当前单元格耗时总长
# 随机森林主要参数
# estimators = [x for x in range(50,550,50)]
# fig = go.Figure()
# # 随机森林的优化和threshold优化合在一起 ------------------------------------------------------
# for i in estimators:
#     # 随机森林
#     feature_RFC = RandomForestClassifier(n_estimators=i,criterion = 'gini',n_jobs = -1)
#     # 记录分数
#     record = []
#     # 获得每个特征的重要性
#     importance =  feature_RFC.fit(feature_train, train_y).feature_importances_
#     # 用numpy.linspace创建等差序列
#     threshold = np.linspace(0, importance.max(), 10)
#     for a in threshold:
#         x_embedded=SelectFromModel(feature_RFC, threshold = a).fit_transform(feature_train, train_y)
#         score=cross_val_score(feature_RFC, x_embedded, train_y, scoring = 'roc_auc',cv = 5,error_score=np.nan).mean()
#         record.append(score)
#     # plotly绘图
#     fig.add_trace(go.Scatter(x=threshold, y=record,mode='lines',name=i))
# fig.show()

'''
从上方绘制的学习曲线图可以看到，随着threshold阈值逐渐增大，score值是逐渐减小的，score最大值为0.865788；不同的n_estimators（决策树个数）在适当threshold阈值可以达到最大score。
在这里，选择n_estimators = 450，threshold =0.005202443作为后续优化
'''

feature_RFC1 = RandomForestClassifier(n_estimators=450,criterion = 'gini',n_jobs = -1)
# 嵌入法最终优化(训练集)
selection = SelectFromModel(feature_RFC1, threshold =  0.005202443)
final_train = selection.fit_transform(feature_train, train_y)
# 对测试集做同等优化
final_test = selection.transform(feature_test)
# 基于该优化的交叉验证评分
final_score=cross_val_score(feature_RFC1, final_train, train_y,scoring = 'roc_auc', cv = 5,error_score=np.nan).mean()
print(final_score)

# 存储经过特征筛选后的训练集和测试集
best_train = pd.DataFrame(final_train)
print(f'训练集维度为{best_train.shape}')
best_test = pd.DataFrame(final_test)
print(f'训练集维度为{best_test.shape}')
best_train.to_csv(r'./data/特征工程/best_train.csv',index = False,encoding = 'utf-8')
best_test.to_csv(r'./data/特征工程/best_test.csv',index = False,encoding = 'utf-8')

