import numpy as np
from scipy.stats import pearsonr
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier

x=[1,3,5,7,9]
y=[2,4,6,8,10]
# 求皮尔逊相关系数（判断两组变量是否线性相关）
def person_correlation_coefficient(x,y):
    cov_xy=np.mean(np.multiply((np.mean(x)-x),(np.mean(y)-y)))
    corr_val=cov_xy/(np.std(x)*np.std(y))
    return corr_val
print(person_correlation_coefficient(x,y))
print(pearsonr(x,y))
df_xy=pd.DataFrame(dict(x=x,y=y))
print(df_xy.corr())
print(df_xy[['x']].corrwith(df_xy['y']))
print(df_xy[['y']].corrwith(df_xy['x']))
print(np.cov(x,y,bias=True))
print(np.corrcoef(x,y))
# 求斯皮尔曼相关性系数（与变量的排列顺序有关）
print(df_xy.corr(method='spearman'))
# 消除共线性
cols_to_del=[]
for ind in data.index():
    for col in data.columns:
        if ind != col and data.loc[ind,col] > 0.7:
            cols_to_del.append(col)
data.drop(columns=cols_to_del,inplace=True)
# 获取重要因子
x_data=data.drop(columns=['cunstomerID','Churn'])
y_data=data['Churn']
logst=LogisticRegression()
logst.fit(x_data,y_data)
rdt=RandomForestClassifier()
rdt.fit(x_data,y_data)
features_coef=[(x,y) for x,y in zip(x_data.columns,logst.coef_[0])]
features_importance=[(x,y) for x,y in zip(x_data.columns,rdt.feature_importances_)]
selected1=SelectFromModel(estimator=logst,prefit=True,threshold=0.1)
selected2=SelectFromModel(estimator=rdt,prefit=True,threshold=0.015)
cols_selected=[]
for col,res in zip(x_data.columns,selected2.get_support()):
    if res:
        cols_selected.append(col)
print(cols_selected)
