import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import roc_auc_score, roc_curve,auc
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, classification_report
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter
import re
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.models import load_model
from keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
#设置中文编码和负号的正常显示
plt.rcParams['font.family']='Microsoft YaHei'
plt.rcParams['axes.unicode_minus'] = False

def process_grade(text):
    return re.sub('[{}]'.format(stopwords_list),"",text)

pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 50)

# 受贿
data_sh = pd.read_excel('受贿终.xlsx')
data_sh['案件'] = 0

# 贪污
data_tw = pd.read_excel('贪污终.xlsx')
data_tw['案件'] = 1
df = pd.concat([data_sh, data_tw], axis=0).reset_index()

del df['index']

"""对性别进行处理 按照名字定性别标签+剔除0"""

print(df['性别'].value_counts())
# 女性名字常用
f_name = '妍 念 倩 幂 辰 慕 理 霜 依 喻 微 紫 盼 语 音 杏 晓 葵 媛 采 乐 青 月 松 彩 碧 蓉 滢 含 馥 素 沐 白 南 容 知 艳 梨 琦 盈 筠 音 茹 静 尔 沛 娅 玉 宸 畅 韵 丹 尚 钰 桐 美 梦 璐 荷 悦 菡 曦 聪 希 黛 虞 歆 可 爽 雅 初 昕 缦 洁 迪 凌 靖 芝 忆 熙 芊 奕 帆 露 灵 霄 颖 笛 迎'
# 男性名字常用
m_name = '伟、刚、勇、毅、俊、峰、强、军、平、保、东、文、辉、力、明、永、健、世、广、志、义、兴、良、海、山、仁、波、宁、贵、福、生、龙、元、全、国、胜、学、祥、才、发、武、新、利、清、飞、彬、富、顺、信、子、杰、涛、昌、成、康、星、光、天、达、安、岩、中、茂、进、林、有、坚、和、彪、博、诚、先、敬、震、振、壮、会、思、群、豪、心、邦、承、乐、绍、功、松、善、厚、庆、磊、民、友、裕、河、哲、江、超、浩、亮、政、谦、亨、奇、固、之、轮、翰、朗、伯、宏、言、若、鸣、朋、斌、梁、栋、维、启、克、伦、翔、旭、鹏、泽、晨、辰、士、以、建、家、致、树、炎、德、行、时、泰、盛'

df['性别'] = df['性别'].astype(str)
for i in range(len(df['当事人'])):
    if df['性别'][i] == '0':
        s = list(df['当事人'][i][1:])
        if len(set(s).intersection(f_name.split(' '))) > 0:
            df['性别'][i] = '女'
for i in range(len(df['当事人'])):
    if df['性别'][i] == '0':
        s = list(df['当事人'][i][1:])
        if len(set(s).intersection(m_name.split('、'))) > 0:
            df['性别'][i] = '男'

gender = np.unique(df['性别']) #['0' '女' '男']
print(gender)
df = df.drop(df[df['性别']=='0'].index)

"""对时间进行处理"""
for index,component in enumerate(['年', '月', '日']):
    df['%s_%s' % ('裁决日期',component)] = df['裁决日期'].apply(lambda x: int(x.split(' ')[0].split('-')[index]))


"""对级别进行处理，①去除其标点符号②对于长度大于6的进行取后两个字符（因为大部分职位基本为后两字代表）"""
stopwords_list = '＂＃＄％＆＇（）＊＋，－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､\u3000、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·！？｡。'

df['级别'] = df['级别'].astype(str).apply(process_grade)
df['级别'] = df['级别'].apply(lambda x : x[-2:] if len(x) > 6 else x)
df['级别'] = df['级别'].apply(lambda x : np.nan if x == 'nan' else x)
df = df.fillna(0)

df.loc[df['其他案件']!=0,'其他案件'] = 1
df.其他案件.value_counts()

df.loc[df['量刑情节']!='0','量刑情节'] = 1
df['量刑情节'] = df['量刑情节'].astype(int)
df.量刑情节.value_counts()

df.loc[df['级别']=='0','级别'] = 0
df.loc[df['级别']!=0,'级别'] = 1
df['级别'] = df['级别'].astype(int)
df.级别.value_counts()

df.loc[df['徒刑']=='0','徒刑'] = 0
df.loc[df['徒刑']!=0,'徒刑'] = 1
df['徒刑'] = df['徒刑'].astype(int)
df.徒刑.value_counts()

"""属性数值化"""
for name in ['性别', '学历']:
    le = LabelEncoder()
    df[name] = le.fit_transform(df[name].astype(str))

# 差异分析
import statsmodels.api as sm
tstat, pvalue, dc = sm.stats.ttest_ind(df.loc[df['性别']==1]['徒刑'],df.loc[df['性别']==0]['徒刑'])
print('t统计量为: %.3f;p值为: %.4f;自由度为：%.1f;' % (tstat, pvalue,dc))

# 男性为1，女性为0
# 分析男性
df = df.loc[df['性别']==0]
df = df.drop('性别',axis=1)

data = df.copy()
feature_names = ['年龄','学历', '数额', '其他案件', '量刑情节','缓刑', '案件','时间']

X_train, X_test, y_train, y_test = train_test_split(data[[i for i in data.columns if i in feature_names]], data['徒刑'], test_size=0.2, random_state=1)

X_train, X_test, y_train, y_test = X_train.values, X_test.values, y_train.values, y_test.values

"""
Synthetic Minority Over-sampling Technique(SMOTE) 
"""
print(Counter(y_train))
sm = SMOTE(random_state=0)
X_train, y_train = sm.fit_resample(X_train, y_train.ravel())
print(Counter(y_train))

# 归一化
scaler = StandardScaler() #MinMaxScaler
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

n_classes = 2
Y_train = to_categorical(y_train, n_classes)
Y_test = to_categorical(y_test, n_classes)

def DNN():
    model = Sequential()
    model.add(Dense(units=512, activation='relu', input_dim=8))
    model.add(Dropout(rate=0.6))
    model.add(Dense(units=256, activation='relu'))
    model.add(Dropout(rate=0.6))
    model.add(Dense(units=n_classes, activation='softmax'))
    model.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['acc'])
    model.summary()
    return model

model = DNN()
history = model.fit(X_train, Y_train, batch_size=32, epochs=100, validation_data= (X_test, Y_test), verbose=2)

# 特征重要性
from eli5.sklearn import PermutationImportance
from keras.wrappers.scikit_learn import KerasClassifier
my_model = KerasClassifier(build_fn=lambda: DNN(), epochs=100, batch_size=8, verbose=0)
my_model.fit(X_train, Y_train)
perm = PermutationImportance(my_model, random_state=1).fit(X_train, Y_train)
# import eli5 eli5.show_weights(perm, feature_names = feature_names.tolist())
#特征重要性可视化
plt.figure(figsize=(10,8))
feature_imp = pd.Series(perm.feature_importances_,index=feature_names).sort_values(ascending=False)
sns.barplot(x= feature_imp,y=feature_imp.index)
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features of females")
plt.legend()
plt.show()
plt.savefig('./result/女性_徒刑_特征重要性图.png', dpi=200, bbox_inches='tight')
feature_imp.to_csv('./result/女性_徒刑_特征重要性表.csv')


tr_acc = history.history['acc']
val_acc= history.history['val_acc']
tr_loss = history.history['loss']
val_loss= history.history['val_loss']

"""画图"""
plt.plot(tr_acc, 'r', label='tr_acc')
plt.plot(val_acc, 'b', label='val_acc')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend()
plt.savefig('./result/女性_徒刑_acc')
plt.show()

plt.plot(tr_loss, 'r', label='tr_loss')
plt.plot(val_loss, 'b', label='val_loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.savefig('./result/女性_徒刑_loss')
plt.show()

# model.save('./result/DNN_女性.h5')
# model = load_model('./result/DNN_女性.h5')

y_pred_ = model.predict_proba(X_test) #[num, 2]

#y_pred = [y_pred_[y_test[i]] for i in range(y_test)] # 按照标签对应的prob
y_pred = []
for i in range(len(y_test)):
    y_pred.append(y_pred_[i][y_test[i]])
print(y_pred)

print(min(y_pred))
y_result = model.predict(X_test)
y_result = np.argmax(y_result, axis=1)
acc = accuracy_score(y_test, y_result) * 100
print("\nTesting Accuracy: {:.3f} %".format(acc))
print('DNN模型auc', roc_auc_score(y_test, y_pred))
pd.DataFrame({'Testing Accuracy':[acc],'Testing AUC':[roc_auc_score(y_test, y_pred)]}).to_csv('result/女性_徒刑_result.csv')


C = confusion_matrix(y_test, y_result)
cm = confusion_matrix(y_test, y_result)
cm_display = ConfusionMatrixDisplay(cm).plot()
plt.savefig('./result/女性_徒刑_混淆矩阵')

plt.figure(figsize=(12, 8), dpi=300) # , dpi=300
fpr, tpr, thresholds = roc_curve(y_test, y_pred, pos_label=1)
plt.plot(fpr, tpr, lw=5, label='{} (AUC={:.3f})'.format('DNN Classifier', auc(fpr, tpr)), color='crimson')
plt.plot([0, 1], [0, 1], '--', lw=5, color='grey')
plt.axis('square')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.title('ROC Curve', fontsize=25)
plt.legend(loc='lower right', fontsize=20)
plt.savefig('./result/女性_徒刑_ROC')

#
# rf = RandomForestClassifier()
# rf.fit(X_train, y_train)
# lr = LogisticRegression()
# lr.fit(X_train, y_train)
# lgb = lgb.LGBMClassifier()
# lgb.fit(X_train, y_train)
#
# def multi_models_roc(names, sampling_methods, colors, X_test, y_test, save=True, dpin=300):
#
#     for (name, method, colorname) in zip(names, sampling_methods, colors):
#         y_test_preds = method.predict(X_test)
#         y_test_predprob = method.predict_proba(X_test)[:, 1]
#         fpr, tpr, thresholds = roc_curve(y_test, y_test_predprob, pos_label=1)
#
#         plt.plot(fpr, tpr, lw=5, label='{} (AUC={:.3f})'.format(name, auc(fpr, tpr)), color=colorname)
#         plt.plot([0, 1], [0, 1], '--', lw=5, color='grey')
#         plt.axis('square')
#         plt.xlim([0, 1])
#         plt.ylim([0, 1])
#         plt.xlabel('False Positive Rate', fontsize=20)
#         plt.ylabel('True Positive Rate', fontsize=20)
#         plt.title('ROC Curve', fontsize=25)
#         plt.legend(loc='lower right', fontsize=20)
#
#     if save:
#         plt.savefig('multi_models_roc.png')
#
#     return plt
#
#
# names = ['RandomForestClassifier',
#          'LogisticRegression',
#          'LightGBM']
#
# sampling_methods = [rf,
#                     lr,
#                     lgb
#                     ]
#
# colors = ['blue',
#           'orange',
#           'green'
#           ]
#
# # ROC curves
# test_roc_graph = multi_models_roc(names, sampling_methods, colors, X_test, y_test, save=False)  # 这里可以改成训练集
#
# plt.show()