#!/usr/bin/env python
# coding: utf-8

import pandas as pd

# Load the CSV file into a DataFrame
df = pd.read_csv('D:/python/assignment/USER_INFO.csv',encoding='GBK')
# Check for duplicate rows
# 根据用户ID查看重复情况
duplicate_USER_ID = df['USER_ID'].duplicated()
num_duplicate_USER_ID = duplicate_USER_ID.sum()

print('根据用户ID查找到重复行数为：',num_duplicate_USER_ID)

# 查看属性是否重复
duplicate_column_names = df.columns.duplicated()
num_duplicate_column_names=duplicate_column_names.sum()
print('根据字段属性查找到重复列数为：',num_duplicate_column_names)



# 对数据进行描述性统计分析
# 返回缺失值个数、最大值、最小值
# 训练样本的描述性统计分析
# 在describe函数中，percentiles参数表示指定计算的分位数表，如1/4分位数、中位数等
explore_train = df.describe(percentiles=[], include='all').T
explore_train['null'] = df.isnull().sum()  # 计算缺失值
explore_train = explore_train[['null', 'max', 'min']]
explore_train.columns = ['空值数', '最大值', '最小值']  # 表头重命名
explore_train.to_csv('D:/python/assignment/explore_train.csv') # 保存训练样本的描述性统计分析
print('训练样本的描述性统计分析：\n',explore_train)



df.shape
print('去重前形状为：',df.shape)
df1=df.drop_duplicates(subset='USER_ID', keep='first',inplace=False)
df1.shape
print('去重后形状为：',df1.shape)




df1.head(5)
#df1.to_csv('D:/python/assignment/filter_userid.csv', index=False)



#删除指定列有缺失值的所在行
df2=df1.dropna(subset=['AGREE_EXP_DATE','VIP_LVL','CONSTELLATION_DESC','OS_DESC'],axis=0)




# 再次查看列缺失统计
df2.isnull().sum(axis=0)



#将性别缺失值赋值为3
s_for_fill=df2.fillna({'CUST_SEX':3})
s_for_fill.isnull().sum(axis=0)




#查看处理缺失值后形状
s_for_fill.shape




#处理异常值
dt=s_for_fill
data_clear=dt.drop(dt[(dt['INNET_MONTH']<0) | (dt['ACCT_FEE']>40000) | (dt['CALL_DURA'] > dt['NO_ROAM_LOCAL_CALL_DURA'] + dt['NO_ROAM_GN_LONG_CALL_DURA'] + 
                                                                       dt['GN_ROAM_CALL_DURA'] + 100)].index)



#查看处理异常值后形状
data_clear.shape




#写出数据预处理后的文件
data_clear.to_csv('D:/python/assignment/USER_INFO_filter.csv',index=False)




import pandas as pd 
# 读取数据
data_clear= pd.read_csv('D:/python/assignment/USER_INFO_filter.csv') 
bins=[0,50,100,150,200,250]#设置分布区间
# 绘制在网时长区间频率分布直方图
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6)) # 设置图框大小
plt.hist(data_clear['INNET_MONTH'],5,range=[0,250],alpha=0.8,edgecolor='black') 
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.title('某运营商2016年3月份在网时长频率分布直方图', fontsize=20) 
plt.xlabel('在网时长区间（小时）') 
plt.ylabel('用户数（个）') 
plt.xticks(bins) 
plt.show()




import pandas as pd 
# 读取数据
data_clear= pd.read_csv('D:/python/assignment/USER_INFO_filter.csv') 
def hours(x):
    return (x/360)
data_clear['CALL_DURA']=data_clear['CALL_DURA'].map(hours)
data_clear['CALL_DURA'].astype('int64')
bins=[0,500,1000,1500,2000,2500,3000,3500,4000]#设置分布区间
# 绘制通话时长区间频率分布直方图
plt.figure(figsize=(10, 6)) # 设置图框大小
plt.hist(data_clear['CALL_DURA'],8,range=[0,4000],alpha=0.8,edgecolor='black') 
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.title('某运营商2016年3月份通话时长频率分布直方图', fontsize=20) 
plt.xlabel('通话时长区间(小时)') 
plt.ylabel('用户数（个）') 
plt.xticks(bins) 
plt.show()




data_clear['CALL_DURA'].astype('int64')
bins=[0,100,200,300,400,500,600]#设置分布区间
# 绘制通话时长区间频率分布直方图
plt.figure(figsize=(10, 8)) # 设置图框大小
plt.hist(data_clear['CALL_DURA'],6,range=[0,600],alpha=0.8,edgecolor='black') 
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.title('某运营商2016年3月份通话时长频率分布直方图', fontsize=20) 
plt.xlabel('通话时长区间(小时)') 
plt.ylabel('用户数（个）') 
plt.xticks(bins) 
plt.show()



#划分数据集
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import numpy as np
data= pd.read_csv('D:/python/assignment/USER_INFO_filter.csv') 
data=data[['USER_ID','INNET_MONTH','CREDIT_LEVEL','CALL_DURA','IS_LOST']]
data=np.array(data)
split = StratifiedShuffleSplit(n_splits = 1,test_size = 0.2,random_state = 420)
 
#根据data["IS_LOST"]来进行分层采样
for train_index,test_index in split.split(data,data[:,-1]):
    train_set = data[train_index,:]
    test_set = data[test_index,:]
print(len(train_set),len(test_set))




train_data=pd.DataFrame(train_set)
print(train_data[4].value_counts() / len(train_data))
test_data=pd.DataFrame(test_set)
print(test_data[4].value_counts() / len(test_data))



# 重命名列
train_data.columns=['USER_ID','INNET_MONTH','CREDIT_LEVEL','CALL_DURA','IS_LOST']
test_data.columns=['USER_ID','INNET_MONTH','CREDIT_LEVEL','CALL_DURA','IS_LOST']
#去掉字符列跟标签列
train = train_data.drop(['USER_ID','IS_LOST'], axis=1)
test = test_data.drop(['USER_ID','IS_LOST'], axis=1)
# # 数据类型转换为数值型
train=train.astype('int64')
test=test.astype('int64')
y=train_data.drop(['USER_ID'], axis=1)
y=y.astype('int64')
#train[np.isinf(train)] = 0
#test[np.isinf(test)] = 0
#数据标准化：零-均值处理
train=(train - train.mean()) / train.std()
test=(test - test.mean()) / test.std()
# 决策树分类模型
from sklearn.tree import DecisionTreeClassifier
model_dt1 = DecisionTreeClassifier(max_leaf_nodes=16, random_state=420,
                                   class_weight='balanced').fit(train,y['IS_LOST'])

# 模型预测
pre_dt = model_dt1.predict(test)

# dt_class存放决策树分类预测结果
dt_class = test_data[['USER_ID']]
dt_class['class'] = pre_dt
# 写出决策树分类预测结果
#dt_class.to_csv('D:/python/assignment/dt_class.csv', index=False)



from sklearn.metrics import precision_score, roc_auc_score, roc_curve
from sklearn.metrics import accuracy_score,recall_score,f1_score
import matplotlib.pyplot as plt

plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用于正常显示负号

#删除用户名,类型转换为数值型
x_test = test_data.drop(['USER_ID'], axis=1)
x_test=x_test.astype('int64')
# 决策树分类模型评价指标值
pre = model_dt1.predict_proba(test)  # 输出预测概率
auc = roc_auc_score(x_test['IS_LOST'], pre[:, 1])  # 计算AUC值
print('AUC值为%.2f%%'% (auc * 100.0))
dt_evaluate_accuracy = accuracy_score(x_test['IS_LOST'], pre_dt)
print('准确率为%.2f%%'% (dt_evaluate_accuracy * 100.0))
dt_evaluate_p = precision_score(x_test['IS_LOST'], pre_dt)
print('精确率为%.2f%%'% (dt_evaluate_p * 100.0))
dt_recall=recall_score(x_test['IS_LOST'],pre_dt)
print('召回率为%.2f%%'%(dt_recall * 100.0))
dt_f1=f1_score(x_test['IS_LOST'],pre_dt)
print('F1分数为%.2f%%'%(dt_f1 * 100.0))
# 绘制ROC曲线
tr_fpr, tr_tpr, tr_threasholds = roc_curve(x_test['IS_LOST'], pre[:, 1])
plt.title("ROC %s(AUC=%.4f)"% ('曲线', auc))
plt.xlabel('假正率')
plt.ylabel('真正率')
plt.plot(tr_fpr, tr_tpr)
plt.show()
#没有标准化的结果：
# AUC值为88.19%
# 准确率为79.07%
# 精确率为5.65%
# 召回率为85.07%
# F1分数为10.59%




from sklearn.metrics import precision_recall_curve,precision_recall_fscore_support
precisions,recalls,thresholds = precision_recall_curve(x_test['IS_LOST'], pre[:, 1])

plt.title('精确率与召回率相关性')
plt.xlabel('查全率（recall）')
plt.ylabel('查准率（precision）')
plt.figure(1)
plt.plot(precisions, recalls)
plt.show()


# # 探测原始数据正反样本比例（正样本：0.032342； 反样本：0.967658）
# # 办法（调权重：class_weight="balanced"）



from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import numpy as np
data2= pd.read_csv('D:/python/assignment/user_info_screen.csv',encoding='GBK') 
data2=data2[['USER_ID','INNET_MONTH','CREDIT_LEVEL','CALL_DURA','IS_LOST']]
data2=np.array(data2)

from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits = 1,test_size = 0.2,random_state = 42)
#根据mnist["target"]来进行分层采样
for train_index,test_index in split.split(data2,data2[:,-1]):
    train1_set = data2[train_index,:]
    test1_set = data2[test_index,:]
print(len(train1_set),len(test1_set))




#将分割后的训练数据转换为DataFrame
#这里的参数data可以是分割之后的训练集或者测试集
train1_data = pd.DataFrame(train1_set)
#快速查看对数据的描述
train1_data.info()
#查看各个类别的比例
print(train1_data[4].value_counts() / len(train1_data))






