import sys, os, re, math,inspect
up_path = os. path.join(os.path.dirname(os.path.abspath(inspect.getframeinfo(inspect.currentframe()).filename)), "../..")
sys.path.append(os.path.abspath(up_path)) #
import pandas as pd
import numpy as np
import toad

import seaborn as sns 
import matplotlib.pyplot as plt 
import warnings

warnings.filterwarnings('ignore')

# from toad.plot import bin_plot, badrate_plot 
from datetime import datetime 
from dateutil import relativedelta 
from IPython.display import HTML
from sklearn.model_selection import train_test_split

from libs.utils.model_trains import lr_model, xgb_model,lr_model_validation 
from sklearn.model_selection import GridSearchCV 
from toad.plot import bin_plot, badrate_plot


def get_path(f):
	# 拼接数据的路径
	return os.path.join(r'/tmp/ossfs/评分卡/data/GiveMeSomeCredit' ,f)

def change_col(data):
	# 查询数据中，列明带了表名，去掉表名
	data.columns = list(map(lambda x: x.split('.')[1], data.columns)) 
	return data


def is_stable(data, col,t='type',target='target'):
	# 是否具备稳定性
	tmp = data[[t,target, col]] # 
	tmp2 = tmp.groupby([t, col]).agg(['sum','count' ]).reset_index() #	count
	tmp2['bad_rate' ] = tmp2[target]['sum' ]/tmp2[target]['count' ] # 
	tmp2 = tmp2.sort_values(by=[t, 'bad_rate' ]) #	

	# pdb.set_trace()

	if len(set(tmp2[t].value_counts()))>1: # 
		print('{} 出现空箱'.format(col))
		return False

	arr = tmp2[col].values.reshape((3, -1)) # 
	join_result = pd.DataFrame(arr).applymap(lambda x:str(x)).apply(lambda x: ''.join(x), axis=1) # 
	return len(set(join_result))==1 #去重之后只有一个值，标识3个时间段的坏样本率排序相同
def is_monotonic (data, col, t='target'):
	'''

	检査一个字段，在训练集上，的坏样本率是否具备单调性
	'''
	assert col!=t
	tmp = data[[col, t]] #
	tmp[t] = np.where(tmp[t]==0, 0, 1) #
	tmp_desc = tmp.groupby(col).agg(['count','sum' ]) # ifcountyy B	sumjfy
	serise_d = round(tmp_desc[t]['sum'] /tmp_desc[t]['count'], 2)
	return serise_d.is_monotonic_decreasing or serise_d.is_monotonic_increasing


def plot_distplot(data, row_n=3):
	cols_len = data.shape[1] 
	row = math.ceil(cols_len/row_n)

	_,axs = plt.subplots(row, row_n, figsize=(row_n*5, row*4))
	for i,c in enumerate(data): 
		ax=axs[i//row_n, i%row_n] 
		sns.distplot(data[c],ax=ax,label=c)
		
def get_score_with_model(bad_rate, A, B): 
	'''
	用式子二计算总分
	'''
	good = max(min(1-bad_rate,0.9999),0.0001) 
	bad = max(min(bad_rate,0.9999), 0.0001) 
	return round(A+B*np.log(good/bad),2)



def plot_box( data, row_n=3):
	cols_len = data.shape[1] 
	row = math.ceil(cols_len/row_n)
	_,axs = plt. subplots(row, row_n,figsize=(row_n*5,row*4)) 
	for i,c in enumerate(data): 
		ax=axs[i//row_n, i%row_n] 
		sns.boxplot(data[c], ax=ax)

def delete_samples(del_sample_path, data_all_7):
	must_sample_7 = pd.read_excel(get_path(del_sample_path))
	must_sample_7.columns = list(map(lambda x:x.split('.')[1], must_sample_7.columns)) # 
	must_sample_7[' flag' ] = 1 #

	must_sample_7 = must_sample_7.drop('type_desc',axis= 1).drop_duplicates() # 
	data_all_7 = pd.merge(data_all_7, must_sample_7 , on="cust_no",how='left')
	data_all_7 = data_all_7.loc[data_all_7['flag' ].isnull(),:].drop('flag',axis=1)
	return data_all_7

def bin_bg_plot(data,col,ax=None):
	'''
	绘制分箱图
	'''
	target_cnt = data.loc[data['target' ]==1, col].value_counts() # 
	target_cnt0 = data.loc[data['target' ]==0, col].value_counts()# 
	bad_rate = target_cnt/data[col].value_counts() # 
	all_rate = data[col].value_counts()/data.shape[0] # 
	tmp_data = pd.concat([target_cnt0,target_cnt],axis=1) # n# 
	tmp_data.columns=['good_cnt','target_cnt'] 
	tmp_data=tmp_data/data.shape[0]

	if ax==None:
		_,ax=plt.subplots(figsize=(12,6)) 
	ax.set_xlim(xmin=0, xmax=10)
	tmp_data.sort_index().plot.bar(stacked=True, ax=ax, color=[ "#83af9b", "#fe4365"]) # 
	sns.lineplot(x=bad_rate.index,y=bad_rate,color='#458994') #  折线图画出本箱的坏样本率
	for x,y in zip(all_rate. index,all_rate): # 样本占比的文本
		ax.text(x=x,y=y+0.01,s=round(y,2),fontsize=15, color="#fe4365") 
	for x,y in zip(bad_rate. index, bad_rate): # 坏样本率的文本
		ax.text(x=x,y=y,s=round(y,2),fontsize=15,color="#458994")

def cal_lift(y_pred,y_true,A,B,ks_score=None): 
	score_pd = pd.DataFrame({'score':pd.Series(y_pred).apply(get_score_with_model,args=(A,B)).values,'possi':y_pred ,'real':y_true.values}) 
	read_bad = score_pd.loc[score_pd['real' ]==1,: ] 
	read_good = score_pd.loc[score_pd['real']==0,:] 
	fig,ax = plt.subplots(1,figsize=(16,8))
	sns.kdeplot(read_bad['score' ], label='bad' ,ax=ax, shade=True) 
	sns.kdeplot(read_good['score' ], label='good' ,ax=ax,shade=True) 
	if ks_score!=None:
		ax.axvline(ks_score,ymin=0,ymax=1, color='r',linestyle='--') 
	ax.legend()
	rs= toad.KS_bucket(score_pd['score'].values, y_true, method='quantile', bucket=10) # 将测试数据分成10组
	rs_filter_map = { 'min' :'最低分数'	,'max':'最高分数','bads':'坏客户数','goods':'好客户数'
	,'total':'总客户数', 'bad_rate' :'精确率', 'bad_prop': '召回率'	, 'total_prop' :'分组客户占比',
	'cum_bads_prop' : '累计坏客户占比','cum_total_prop':'累计总客户占比'}

	ks_bucket_pd = rs[rs_filter_map.keys()].sort_index().rename(columns=rs_filter_map) 
	ks_bucket_pd['lift' ]=ks_bucket_pd["累计坏客户占比"]/ks_bucket_pd['累计总客户占比' ] # 计算lift值
	# ks_bucket_pd.applymap(lambda x:round(x,2))

	fig,ax = plt.subplots(1,figsize=(16,8))
	ks_bucket_pd[['召回率','分组客户占比']].reset_index().drop('index' ,axis=1).plot.bar(ax=ax) # |
	return ks_bucket_pd	

