
import pandas as pd
import numpy as np

import math
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

import pandas as pd
import numpy as np

import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns

from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

from sklearn.linear_model import LogisticRegression


from sklearn.metrics import classification_report,roc_auc_score,accuracy_score 
from xgboost.sklearn import XGBClassifier


#更标准版：
from sklearn import metrics
def ks_score_V2(y_true,y_pred):
    tpr,fpr,threshold=metrics.roc_curve(np.array(y_true),y_pred)
    ks=abs(tpr-fpr).max()
    #print(threshold)
    return ks


# 定义自动分箱函数---最优分箱
def mono_bin(Y, X, n ):
    r = 0  #设定斯皮尔曼 初始值
#    good=Y.sum()   #好客户的人数
#    bad=Y.count()-good   #坏客户的人数
    good=Y.sum()   #目标客户的人数
    bad=Y.count()-good  #非目标客户的人数

#    seXNorm,seYNorm,seXAbnorm,seYAbnorm = split_abnorm_ptns(X,Y)

    rowN = n

  #下面这段就是分箱的核心 ，就是 机器来 选择 指定 最优的分箱节点，代替我们自己来设置
    while np.abs(r) < 0.95: #1:   #while ,不满足条件时，跳出循环
#        X = seXNorm
#        Y = seYNorm

        d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates="drop")}) #注意这里是pd.qcut, Bucket： 将 X 分为 n 段，n由 斯皮尔曼系数决定
        d2 = d1.groupby('Bucket', as_index = True,  observed=True)
#        print("d2.mean().X:")
#        print(str(d2.mean().X))
#        print("d2.mean().Y:")
#        print(str(d2.mean().Y))

        r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)    #　？　？　？    # 以斯皮尔曼系数作为分箱终止条件
#        print("r:" + str(r))
#        print("n:" + str(n))
        n = n - 1
#    print("d2:" + str(d2))
#    print("res n:" + str(n))
#    print("res r:" + str(r))
    if str(r) == 'nan':
      n = 1

    d3 = pd.DataFrame(d2.X.min(), columns = ['min'])
    d3['min']=d2.min().X    #  min 就是分箱的节点
    d3['max'] = d2.max().X
    d3['sum'] = d2.sum().Y
    d3['total'] = d2.count().Y
    d3['rate'] = d2.mean().Y
    d3['woe']=np.log((d3['rate']/(1-d3['rate']))/(good/bad))
    d3['goodattribute']=d3['sum']/good
    d3['badattribute']=(d3['total']-d3['sum'])/bad

    rawIvs = (d3['goodattribute']-d3['badattribute'])*d3['woe']
#    print("rawIvs:" + str(rawIvs))
    d3['bktIV'] = rawIvs

    iv=((d3['goodattribute']-d3['badattribute'])*d3['woe']).sum()   #返回 iv

#    dAbn = calc_abnorm_infos(seXAbnorm,seYAbnorm, good,bad)  # TODO
#    d3 = pd.concat([d3,dAbn],ignore_index=True)

    d4 = (d3.sort_values(by = 'min')).reset_index(drop=True)    # 返回 d
    woe=list(d4['woe'].round(3))             #返回 woe
    cut=[]    #  cut 存放箱段节点
    cut.append(float('-inf'))  # 在 列表前加 -inf
    for i in range(1,n+1):            # n 是前面的 分箱的 分割数  ，所以 分成n+1份
         qua=X.quantile(i/(n+1))     #quantile 分为数  得到分箱的节点
         cut.append(round(qua,4))   # 保留4位小数       #返回cut
    cut.append(float('inf')) # 在列表后加  inf
#    print("d4:" + str(d4))
#    print("iv:" + str(iv))
#    print("cut:" + str(cut))
#    print("woe:" + str(woe))
    return d4,iv,cut,woe


def bin_frequency(x,y,n=10): # x为待分箱的变量，y为target变量.n为分箱数量
    total = y.count()       #1 计算总样本数
    bad = y.sum()           #2 计算坏样本数
    good = total-bad        #3 计算好样本数
    if x.value_counts().shape[0]==2:    #4 如果该变量值是0和1则只分两组
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
    else:
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')})  #5 用pd.cut实现等频分箱
#        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,n,duplicates='drop')})
    d2 = d1.groupby('bucket',as_index=True)  #6 按照分箱结果进行分组聚合
    d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
    d3['min_bin'] = d2.x.min()               #7 箱体的左边界
    d3['max_bin'] = d2.x.max()               #8 箱体的右边界
    d3['bad'] = d2.y.sum()                   #9 每个箱体中坏样本的数量
    d3['total'] = d2.y.count()               #10 每个箱体的总样本数
    d3['bad_rate'] = d3['bad']/d3['total']   #11 每个箱体中坏样本所占总样本数的比例
    d3['badattr'] = d3['bad']/bad            #12 每个箱体中坏样本所占坏样本总数的比例
    d3['goodattr'] = (d3['total'] - d3['bad'])/good    #13 每个箱体中好样本所占好样本总数的比例
    d3['WOEi'] = np.log(d3['badattr']/d3['goodattr'])  #14 计算每个箱体的woe值
    IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum()  #15 计算变量的iv值
    d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']   #16 计算IV
    d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
    cut = []
    cut.append(float('-inf'))
    for i in d4.min_bin:
        cut.append(i)
    cut.append(float('inf'))
    WOEi = list(d4['WOEi'].round(3))
    return IV,cut,WOEi,d4


def cal_psi(actual, predict, bins=10):
    """
    功能: 计算PSI值，并输出实际和预期占比分布曲线
    :param actual: Array或series，代表真实数据，如训练集模型得分
    :param predict: Array或series，代表预期数据，如测试集模型得分
    :param bins: 分段数
    :return:
        psi: float，PSI值
        psi_df:DataFrame

    Examples
    -----------------------------------------------------------------
    >>> import random
    >>> act = np.array([random.random() for _ in range(5000000)])
    >>> pct = np.array([random.random() for _ in range(500000)])
    >>> psi, psi_df = cal_psi(act,pct)
    >>> psi
    1.65652278590053e-05
    >>> psi_df
       actual  predict  actual_rate  predict_rate           psi
    0  498285    49612     0.099657      0.099226  1.869778e-06
    1  500639    50213     0.100128      0.100428  8.975056e-07
    2  504335    50679     0.100867      0.101360  2.401777e-06
    3  493872    49376     0.098775      0.098754  4.296694e-09
    4  500719    49710     0.100144      0.099422  5.224199e-06
    5  504588    50691     0.100918      0.101384  2.148699e-06
    6  499988    50044     0.099998      0.100090  8.497110e-08
    7  496196    49548     0.099239      0.099098  2.016157e-07
    8  498963    50107     0.099793      0.100216  1.790906e-06
    9  502415    50020     0.100483      0.100042  1.941479e-06

    """
    actual_min = actual.min()  # 实际中的最小概率
    actual_max = actual.max()  # 实际中的最大概率
    binlen = (actual_max - actual_min) / bins
    cuts = [actual_min + i * binlen for i in range(1, bins)]#设定分组
    cuts.insert(0, -float("inf"))
    cuts.append(float("inf"))
    actual_cuts = np.histogram(actual, bins=cuts)#将actual“等宽”分箱
    predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
    actual_df = pd.DataFrame(actual_cuts[0],columns=['actual'])
    predict_df = pd.DataFrame(predict_cuts[0], columns=['predict'])
    psi_df = pd.merge(actual_df,predict_df,right_index=True,left_index=True)
    psi_df['actual_rate'] = (psi_df['actual'] + 1) / psi_df['actual'].sum()#计算占比，分子加1，防止计算PSI时分子分母为0
    psi_df['predict_rate'] = (psi_df['predict'] + 1) / psi_df['predict'].sum()
    psi_df['psi'] = (psi_df['actual_rate'] - psi_df['predict_rate']) * np.log(
        psi_df['actual_rate'] / psi_df['predict_rate'])
    psi = psi_df['psi'].sum()
    return psi, psi_df


################

def stat_group_labs_rate(vSL): #, numAllY0,numAllY1):
  numLab1 = 0
  numLabs = len(vSL)
  minScore = 100000
  maxScore = -100
  vScores = []
  for kv in vSL:
    (score, sIdx) = kv[0]
    lab = kv[1]
    vScores.append(score)
    if score > maxScore:
      maxScore = score
    if score < minScore:
      minScore = score
    if lab > 0:
      numLab1 += 1
  numLab0 = numLabs - numLab1
#  print("numLabs:" + str(numLabs))
#  print("numLab1:" + str(numLab1))
#  print("rateLab1:" + str(numLab1/numLabs))
  rateLab1 = numLab1 / numLabs
  avgScore = sum(vScores) / len(vScores)

  return minScore, maxScore, avgScore, rateLab1, numLab1, numLabs

def calc_cur_group_iv(numAllY0, numAllY1, numCurY0, numCurY1):
  allTotal = numAllY0 + numAllY1
  curTotal = numCurY0 + numCurY1

  bad_rate = numCurY1 / curTotal
#  print("bad_rate:" + str(bad_rate))
  badattr = numCurY1 / numAllY1
  goodattr = numCurY0 / numAllY0
  WOEi = np.log(badattr / goodattr)
  IVi = (badattr-goodattr) * WOEi

#  woe = np.log((bad_rate/(1-bad_rate)) / (numAllY1/numAllY0))
#  rIv = (badattr-goodattr) * woe
  return IVi #, rIv


def calc_score_labs_bins_by_us(scores, labs, numGroups=10):  # 等频分箱（箱内数据量一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  sSls = sorted(dScoreLab.items(), key=lambda x: x[0][0],reverse=True)
#  vSSLs = []
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(numGroups-1):
    begIdx = sIdx
    endIdx = sIdx + numInGrp
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))
#    vSSLs.append(vCurRng)

    sIdx += numInGrp

#  print("last rng:" + str(sIdx) + "," + str(numItems))
  vCurRng = sSls[sIdx : numItems]
  minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
  sumLabs += numLabs
  sumLab1 += numLab1
  curLift = rateLab1/rateAllLab1
  cumRateLab1 = sumLab1 / sumLabs
  cumLift = cumRateLab1 / rateAllLab1

  curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

  vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

  return vRes
  
  

def calc_score_labs_bins_by_us_v2(scores, labs, numGroups=10): # 等宽分箱（各箱宽度一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
#  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  print("sSls:" +str(sSls))
  # sSls:[((0.14229898, 26025), 0), ((0.13870734, 18148), 0), ((0.13861467, 25775), 0), ..., ((0.072855696, 20921), 0), ((0.07275263, 3106), 0), ((0.07274652, 16943), 0)]
  vsScores = []
  for kv in sSls:
    vsScores.append(kv[0][0])
  
  maxScore = sSls[0][0][0]
  minScore = sSls[-1][0][0]
  print("maxScore:" + str(maxScore))
  print("minScore:" + str(minScore))
  # maxScore:(0.14229898, 26025)
  # minScore:(0.07274652, 16943)
  lenBin = (maxScore-minScore) / numGroups
  print("lenBin:" + str(lenBin))
  cuts = [minScore + i * lenBin for i in range(1, numGroups)]#设定分组
  cuts.insert(0, -float("inf"))
  cuts.append(float("inf"))
  print("cuts:" + str(cuts))
  actual_cuts = np.histogram(vsScores, bins=cuts)#将actual“等宽”分箱
#  predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
  print("actual_cuts:" + str(actual_cuts))
  print(type(actual_cuts))
  vGroupsLen = actual_cuts[0].tolist()
  vGroupsLen = vGroupsLen[::-1]  
   
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(len(vGroupsLen)): #numGroups-1):
    cGrpNum = vGroupsLen[gIdx]
    begIdx = sIdx
    endIdx = sIdx + cGrpNum
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

    sIdx += cGrpNum

#  print("last rng:" + str(sIdx) + "," + str(numItems))

  return vRes
  


def calc_date_ranges_effects(serDt, vPreds, vLabs, nDaysInGroup, tag, fOut):
  print("in calc_date_ranges_effects ...")
  dDtFreq = {}
  pIdx = 0
  for dt in serDt:
    cP = vPreds[pIdx]
    cL = vLabs[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append((cP,cL)) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  print(len(sDtFreqs))
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = ([],[])
      dGrpDts[cGrpIdx] = []
      
    for pp in kv[1]: # [0]:
      dGrpPreds[cGrpIdx][0].append(pp[0])
      dGrpPreds[cGrpIdx][1].append(pp[1])
#      dGrpPreds[cGrpIdx][0].append(pp) #kv[1][0])
#    for ll in kv[1][1]:
#      dGrpPreds[cGrpIdx][1].append(ll) #kv[1][1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  for kv in sGrpFreqs:
    cGIdx = kv[0]
    dtRng = (dGrpDts[cGIdx][0], dGrpDts[cGIdx][-1])
    print(str(cGIdx) + " - " + str(dtRng))
    vCPreds = kv[1][0]
    vCLabs = kv[1][1]
    print(len(vCPreds))
    print(len(vCLabs))
    
    cAUC = roc_auc_score(pd.Series(vCLabs), pd.Series(vCPreds))
    cKS = ks_score_V2(pd.Series(vCLabs), pd.Series(vCPreds))
    print(tag + ", auc:" + str(cAUC) + " - ks:" + str(cKS))
  

def calc_oot_preds_psis(serOotDt, serPreds, nDaysInGroup, tag,fOut):
  dDtFreq = {}
  pIdx = 0
  for dt in serOotDt:
    cP = serPreds[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append(cP) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = []
      dGrpDts[cGrpIdx] = []
    dGrpPreds[cGrpIdx].append(kv[1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpPreds = []
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  #for gIdx in dGrpPreds:
  for kv in sGrpFreqs:
    gIdx = kv[0]
    cGDtsPreds = dGrpPreds[gIdx]
    cFInfo = ""
    cGPreds = []
    for cv in cGDtsPreds:
      cFInfo = cFInfo + str(len(cv)) + ";"
      for cp in cv:
        cGPreds.append(cp)
#    print(tag + ", " +str(gIdx) + ", " + cFInfo)
    vGrpPreds.append(cGPreds)
    vGrpDtRng.append((dGrpDts[gIdx][0], dGrpDts[gIdx][-1]))
  
  for gIdx in range(len(vGrpPreds)):
    print(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])))
    if fOut is not None:
      fOut.write(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])) + "\n")
    if gIdx < len(vGrpPreds)-1:
      cGPs = vGrpPreds[gIdx]
      nGPs = vGrpPreds[gIdx+1]
      cGRng = vGrpDtRng[gIdx]
      nGRng = vGrpDtRng[gIdx+1]
      psi,psidf = cal_psi(pd.Series(cGPs), pd.Series(nGPs))
      print(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi))
      if fOut is not None:
        fOut.write(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi) + "\n")




rawPtnsPath = "../恒昌10万_青禾苗.xlsx"

pdData = pd.read_excel(rawPtnsPath)
pdData['mn'] = pdData['apply_date'].apply(lambda x:str(x)[:6])
print(pdData)
print(pdData.columns.tolist())

featPath = "D:/yrProj/20240612_众安15w/青禾苗-幸福消金-20240603_report.xlsx/银融_青禾苗_三版模型/银融_青禾苗_入模特征_子分1.1.txt"

vFeats = []
vTrainCols = []
for f in open(featPath):
  f = f.strip()
  if len(f) > 0:
    vFeats.append(f)
    vTrainCols.append(f)


print("vFeats:")
print(vFeats)
print(len(vFeats))


pdPtn1 = pdData[pdData['fpd15_flag']>=0]
pdPtn2 = pdData[pdData['mob3_15_flag']>=0]

print(pdPtn1[['fpd15_flag','mn']].value_counts())
print(pdPtn2[['mob3_15_flag','mn']].value_counts())

#pdPtn_train = pdPtn1[pdPtn1['apply_date'] <= 20240315]
#pdPtn_oot = pdPtn1[pdPtn1['apply_date'] > 20240315]
#colLab = 'fpd15_flag'

pdPtn_train = pdPtn2[pdPtn2['apply_date'] <= 20240115]
pdPtn_oot = pdPtn2[pdPtn2['apply_date'] > 20240115]
colLab = 'mob3_15_flag'

ptnProxPath = "./预估分/w10_" + colLab +"_prox"
testProxPath = "./预估分/k42_" + colLab +"_prox"

print(pdPtn_train[[colLab,'mn']].value_counts())
print(pdPtn_oot[[colLab,'mn']].value_counts())


#print("oot cols:")
#print(pdPtn_oot.columns.tolist())
#print(pdPtn_oot.apply_date.value_counts())
#exit()

vTrainCols.append('apply_date')
vTrainCols.append(colLab)

#train_x,test_x,train_y,test_y = train_test_split(pdPtn_train.drop(toDrop,axis=1), pdPtn_train[colLab], test_size=0.3, random_state=0)
train_x,test_x,train_y,test_y = train_test_split(pdPtn_train[vTrainCols], pdPtn_train[colLab], test_size=0.3, random_state=0)

print("train_x.columns:")
print(train_x.columns.tolist())
print(len(train_x.columns.tolist()))

dParams = {'learning_rate':0.01, 'n_estimators':60, 'max_depth':3, 'min_child_weight':100}

learning_rate = dParams['learning_rate']
n_estimators = dParams['n_estimators']
max_depth = dParams['max_depth']
min_child_weight = dParams['min_child_weight']
  
print(">>>>>")
print(colLab)
#  print(vFeats)
print("learning_rate="+str(learning_rate))
print("n_estimators="+str(n_estimators))
print("max_depth="+str(max_depth))
print("min_child_weight="+str(min_child_weight))

# 更新寻优后的参数
xgb1 = XGBClassifier(
 learning_rate =learning_rate,
 n_estimators=n_estimators,
 max_depth=max_depth,
 min_child_weight=min_child_weight, #gamma=2.0, reg_lambda=2.0, reg_alpha=2.0, # subsample=0.8, colsample_bytree=0.8, 
# eta=0.01,
 objective= 'binary:logistic',
 nthread=4,
# scale_pos_weight=1,
 seed=27)

print("参数寻优后训练：")
xgb1.fit(train_x[vFeats],train_y) 
prex=xgb1.predict(test_x[vFeats]) 
prox=xgb1.predict_proba(test_x[vFeats])[:,1]
prox_train=xgb1.predict_proba(train_x[vFeats])[:,1]


train_prox=xgb1.predict_proba(train_x[vFeats])[:,1]
print('train ROC: {}'.format(roc_auc_score(train_y,train_prox)))
ks2_train = ks_score_V2(train_y,prox_train)
print("ks2_train:" + str(ks2_train))


print('test accuracy: {}'.format(accuracy_score(test_y,prex))) 
print('test ROC: {}'.format(roc_auc_score(test_y,prox)))
ks2_test = ks_score_V2(test_y,prox)
print("ks2_test:" + str(ks2_test))


#prex_oot = xgb1.predict(pdPtn_oot.drop(toDrop,axis=1)) 
#prox_oot = xgb1.predict_proba(pdPtn_oot.drop(toDrop,axis=1))[:,1]
prex_oot = xgb1.predict(pdPtn_oot[vFeats]) 
prox_oot = xgb1.predict_proba(pdPtn_oot[vFeats])[:,1]
print('oot accuracy: {}'.format(accuracy_score(pdPtn_oot[colLab], prex_oot))) 
print('oot ROC: {}'.format(roc_auc_score(pdPtn_oot[colLab], prox_oot)))
ks2_oot = ks_score_V2(pdPtn_oot[colLab],prox_oot)
print("ks2_oot:" + str(ks2_oot))


if True:
  
  vSLStats = calc_score_labs_bins_by_us(prox_oot, pdPtn_oot[colLab], numGroups=10)
  for sl in vSLStats:
    print(sl)


  fi = xgb1.feature_importances_
  tc = list(train_x.columns)

  dFws = {}
  for fIdx in range(len(fi)):
    cc = tc[fIdx]
    cw = fi[fIdx]
    dFws[cc] = cw

  sFMWs = sorted(dFws.items(), key=lambda x: x[1],reverse=True)

  # 记录保存特征指标
  nF = 0
  for kv in sFMWs:
    ft = kv[0]
    wgt = kv[1]
    if wgt > 0.0:
      print("FI," + ft + "," + str(wgt))
      nF += 1
  print("nF:" + str(nF))
  
  calc_oot_preds_psis(pdPtn_oot['apply_date'], prox_oot, 4, "oot", None)

  calc_oot_preds_psis(test_x['apply_date'], prox, 10, "test", None)
  calc_oot_preds_psis(train_x['apply_date'], prox_train, 10, "train", None)
#  calc_oot_preds_psis(pdData['apply_date'], prox_train, 10, None)

  psiTrainTest,_1 = cal_psi(pd.Series(prox_train), pd.Series(prox))
  psiTrainOot,_2 = cal_psi(pd.Series(prox_train), pd.Series(prox_oot))
  psiTestOot,_3 = cal_psi(pd.Series(prox), pd.Series(prox_oot))
  print("psiTrainTest:" + str(psiTrainTest))
  print("psiTrainOot:" + str(psiTrainOot))
  print("psiTestOot:" + str(psiTestOot))
  
  calc_date_ranges_effects(train_x['apply_date'], prox_train.tolist(), train_x[colLab].tolist(), 10, "train eff", None)
  calc_date_ranges_effects(test_x['apply_date'], prox.tolist(), test_x[colLab].tolist(), 10, "valid eff", None)
  calc_date_ranges_effects(pdPtn_oot['apply_date'], prox_oot.tolist(), pdPtn_oot[colLab].tolist(), 4, "oot eff", None)



# pdData
# ptnProxPath

# D:/yrProj/20240615_恒昌/恒昌样本/盲测42k_青禾苗特征.xlsx
# testProxPath

prox_ptn = xgb1.predict_proba(pdData[vFeats])[:,1]
fOutPtn = open(ptnProxPath, 'w')
for pp in prox_ptn:
  fOutPtn.write(str(pp) + "\n")
fOutPtn.close()

pdTest = pd.read_excel("D:/yrProj/20240615_恒昌/恒昌样本/盲测42k_青禾苗特征.xlsx") #, encoding='utf-8')
prox_test = xgb1.predict_proba(pdTest[vFeats])[:,1]
fOutTest = open(testProxPath, 'w')
for pp in prox_test:
  fOutTest.write(str(pp) + "\n")
fOutTest.close()

'''
>>>>>
mob3_15_flag   <<<<<<<<<<<
learning_rate=0.01
n_estimators=60
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.578746819258908
ks2_train:0.11440524418168141
test accuracy: 0.9000632955904072
test ROC: 0.5615808361594148
ks2_test:0.10231305933951901
oot accuracy: 0.909473183116471
oot ROC: 0.5361615388244142
ks2_oot:0.05857966373233148
(0.1453, 0.1862, 0.1538, 1260, 0.1183, 1.3063, 1.3063, 0.01)
(0.1433, 0.1453, 0.1442, 1260, 0.1024, 1.1309, 1.2186, 0.002)
(0.1412, 0.1433, 0.1422, 1260, 0.0929, 1.0257, 1.1543, 0.0001)
(0.1402, 0.1412, 0.1408, 1260, 0.0825, 0.9118, 1.0937, 0.001)
(0.1385, 0.1402, 0.1395, 1260, 0.0944, 1.0433, 1.0836, 0.0002)
(0.1378, 0.1385, 0.1381, 1260, 0.096, 1.0608, 1.0798, 0.0004)
(0.1366, 0.1378, 0.1374, 1260, 0.0786, 0.8679, 1.0495, 0.0022)
(0.135, 0.1366, 0.1356, 1260, 0.0913, 1.0082, 1.0444, 0.0)
(0.1327, 0.135, 0.1339, 1260, 0.0833, 0.9205, 1.0306, 0.0008)
(0.1256, 0.1327, 0.131, 1264, 0.0657, 0.7254, 1.0, 0.0105)
FI,S02_n65,0.11595881
FI,S02_n85,0.10114597
FI,S02_n77,0.092870705
FI,S02_n72,0.08566492
FI,S02_n48,0.074440084
FI,S03_n65,0.046047766
FI,S03_n100,0.04313848
FI,S03_n21,0.040034376
FI,S03_n22,0.03940377
FI,S05_n1,0.039298583
FI,S02_n73,0.039083682
FI,S03_n23,0.037797596
FI,S01_n17,0.036241792
FI,S03_n82,0.035148554
FI,S03_n42,0.031758506
FI,S03_n44,0.030700397
FI,S01_n7,0.030323973
FI,S05_n5,0.026149448
FI,S01_n3,0.021302633
FI,S01_n6,0.017158989
FI,S03_n28,0.016330918
nF:21
oot， grp_0, 4112
oot, psi of (20240116, 20240119) vs (20240120, 20240123) : 0.006136180882736965
oot， grp_1, 3120
oot, psi of (20240120, 20240123) vs (20240124, 20240127) : 0.014720552210693176
oot， grp_2, 2618
oot, psi of (20240124, 20240127) vs (20240128, 20240131) : 0.0010795878797341299
oot， grp_3, 2754
test， grp_0, 3000
test, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.011061001954826745
test， grp_1, 3130
test, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.012366562915836973
test， grp_2, 2667
test, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.021919298590032183
test， grp_3, 3635
test, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.01192278918347348
test， grp_4, 1787
train， grp_0, 6981
train, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.006865345005763968
train， grp_1, 7317
train, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.009459729813849346
train， grp_2, 6141
train, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.013127823139186002
train， grp_3, 8531
train, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.007828049642436032
train， grp_4, 4207
psiTrainTest:0.0005760361636634317
psiTrainOot:0.005445349877384797
psiTestOot:0.004563231861773121
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
6981
6981
train eff, auc:0.5830122022987129 - ks:0.12348038434994957
1 - (20231211, 20231220)
7317
7317
train eff, auc:0.5685295841653917 - ks:0.11317502657670814
2 - (20231221, 20231230)
6141
6141
train eff, auc:0.5770228413416589 - ks:0.13156940920623417
3 - (20231231, 20240109)
8531
8531
train eff, auc:0.5763475127006654 - ks:0.12179951483024237
4 - (20240110, 20240115)
4207
4207
train eff, auc:0.5944186355189807 - ks:0.14393778507131305
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
3000
3000
valid eff, auc:0.5780021710872205 - ks:0.12789998373575778
1 - (20231211, 20231220)
3130
3130
valid eff, auc:0.5664890184975421 - ks:0.11580292965066324
2 - (20231221, 20231230)
2667
2667
valid eff, auc:0.5367597711802116 - ks:0.07723786392253373
3 - (20231231, 20240109)
3635
3635
valid eff, auc:0.5693697954563829 - ks:0.12808634489449433
4 - (20240110, 20240115)
1787
1787
valid eff, auc:0.5423607218921647 - ks:0.10347868325673504
in calc_date_ranges_effects ...
16
0 - (20240116, 20240119)
4112
4112
oot eff, auc:0.5312422434897568 - ks:0.07453957355196567
1 - (20240120, 20240123)
3120
3120
oot eff, auc:0.5375283973324141 - ks:0.07832140656462483
2 - (20240124, 20240127)
2618
2618
oot eff, auc:0.5318790917955147 - ks:0.07070436922505452
3 - (20240128, 20240131)
2754
2754
oot eff, auc:0.5464335326950431 - ks:0.09305838608516268



>>>>>
mob3_15_flag
learning_rate=0.01
n_estimators=50
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.5769203266792239
ks2_train:0.10913099873495102
test accuracy: 0.9000632955904072
test ROC: 0.5612637233628275
ks2_test:0.10020137514889238
oot accuracy: 0.909473183116471
oot ROC: 0.5362366576210639
ks2_oot:0.05670716047661023
(0.1487, 0.1831, 0.1561, 1260, 0.1183, 1.3063, 1.3063, 0.01)
(0.1466, 0.1487, 0.1475, 1260, 0.1008, 1.1134, 1.2098, 0.0015)
(0.1449, 0.1466, 0.1458, 1260, 0.0873, 0.9644, 1.128, 0.0002)
(0.144, 0.1449, 0.1444, 1260, 0.1032, 1.1397, 1.1309, 0.0022)
(0.1425, 0.144, 0.1435, 1260, 0.0865, 0.9556, 1.0959, 0.0002)
(0.1416, 0.1425, 0.1421, 1260, 0.1071, 1.1835, 1.1105, 0.0038)
(0.1406, 0.1416, 0.1414, 1260, 0.0579, 0.64, 1.0433, 0.0191)
(0.1394, 0.1406, 0.1399, 1260, 0.119, 1.3151, 1.0772, 0.0106)
(0.1375, 0.1394, 0.1385, 1260, 0.0595, 0.6575, 1.0306, 0.017)
(0.1317, 0.1375, 0.1361, 1264, 0.0657, 0.7254, 1.0, 0.0105)
FI,S02_n65,0.13278441
FI,S02_n85,0.11937631
FI,S02_n77,0.107824594
FI,S02_n48,0.086553186
FI,S03_n65,0.052145984
FI,S03_n100,0.051619697
FI,S03_n21,0.045529798
FI,S03_n22,0.045039378
FI,S02_n73,0.0440204
FI,S03_n23,0.04249228
FI,S03_n82,0.040029466
FI,S01_n17,0.039146416
FI,S03_n44,0.03470157
FI,S01_n7,0.034154236
FI,S03_n42,0.03354142
FI,S05_n5,0.02945242
FI,S01_n3,0.023967719
FI,S01_n6,0.019326366
FI,S03_n28,0.01829442
nF:19
oot， grp_0, 4112
oot, psi of (20240116, 20240119) vs (20240120, 20240123) : 0.006836756275812191
oot， grp_1, 3120
oot, psi of (20240120, 20240123) vs (20240124, 20240127) : 0.021521732014159863
oot， grp_2, 2618
oot, psi of (20240124, 20240127) vs (20240128, 20240131) : 0.011967125645936936
oot， grp_3, 2754
test， grp_0, 3000
test, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.011085323255697183
test， grp_1, 3130
test, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.011419683529984428
test， grp_2, 2667
test, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.01980190288766249
test， grp_3, 3635
test, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.018036503518064996
test， grp_4, 1787
train， grp_0, 6981
train, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.0082824773023802
train， grp_1, 7317
train, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.010090315668278938
train， grp_2, 6141
train, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.010940935518866437
train， grp_3, 8531
train, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.00852857008505515
train， grp_4, 4207
psiTrainTest:0.0005649175571269323
psiTrainOot:0.007274169341842544
psiTestOot:0.006434012598598868
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
6981
6981
train eff, auc:0.5816495222849738 - ks:0.11641981207198604
1 - (20231211, 20231220)
7317
7317
train eff, auc:0.5671925441480768 - ks:0.1063903326003881
2 - (20231221, 20231230)
6141
6141
train eff, auc:0.5762432535109087 - ks:0.12882514368560039
3 - (20231231, 20240109)
8531
8531
train eff, auc:0.5735329294075484 - ks:0.1191492939132206
4 - (20240110, 20240115)
4207
4207
train eff, auc:0.5913962762748268 - ks:0.12861995318193886
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
3000
3000
valid eff, auc:0.5750399356490756 - ks:0.12842951720416412
1 - (20231211, 20231220)
3130
3130
valid eff, auc:0.5656402999577571 - ks:0.11559998720580239
2 - (20231221, 20231230)
2667
2667
valid eff, auc:0.5395489118276819 - ks:0.08108401776868757
3 - (20231231, 20240109)
3635
3635
valid eff, auc:0.5697101625030317 - ks:0.12745977847845424
4 - (20240110, 20240115)
1787
1787
valid eff, auc:0.5402776220901991 - ks:0.09977954638867093
in calc_date_ranges_effects ...
16
0 - (20240116, 20240119)
4112
4112
oot eff, auc:0.5314365644832172 - ks:0.07472899569685143
1 - (20240120, 20240123)
3120
3120
oot eff, auc:0.5376089146662656 - ks:0.08202786565183773
2 - (20240124, 20240127)
2618
2618
oot eff, auc:0.5307312996239031 - ks:0.06466638807633374
3 - (20240128, 20240131)
2754
2754
oot eff, auc:0.5477128858647279 - ks:0.09644725787534211




>>>>>
mob3_15_flag
learning_rate=0.01
n_estimators=80
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.582215737454931
ks2_train:0.11394774444590566
test accuracy: 0.9000632955904072
test ROC: 0.5637366203089218
ks2_test:0.10194365344954603
oot accuracy: 0.909473183116471
oot ROC: 0.5358766225946789
ks2_oot:0.05405716811846639
(0.1401, 0.1923, 0.1501, 1260, 0.1159, 1.28, 1.28, 0.0085)
(0.1372, 0.1401, 0.1384, 1260, 0.1079, 1.1923, 1.2362, 0.0041)
(0.135, 0.1372, 0.1359, 1260, 0.0802, 0.8855, 1.1193, 0.0017)
(0.1335, 0.135, 0.1342, 1260, 0.0873, 0.9644, 1.0805, 0.0002)
(0.1318, 0.1335, 0.1326, 1260, 0.1024, 1.1309, 1.0906, 0.002)
(0.1309, 0.1318, 0.1313, 1260, 0.0905, 0.9994, 1.0754, 0.0)
(0.1292, 0.1309, 0.1303, 1260, 0.0849, 0.9381, 1.0558, 0.0005)
(0.1275, 0.1292, 0.1281, 1260, 0.0841, 0.9293, 1.04, 0.0006)
(0.1245, 0.1275, 0.1261, 1260, 0.081, 0.8942, 1.0238, 0.0014)
(0.114, 0.1244, 0.1222, 1264, 0.0712, 0.7865, 1.0, 0.0061)
FI,S02_n65,0.0952795
FI,S02_n85,0.08110098
FI,S02_n77,0.07451281
FI,S02_n72,0.0691238
FI,S02_n48,0.06315404
FI,S03_n65,0.038797837
FI,S03_n100,0.03481872
FI,S03_n21,0.034117073
FI,S05_n1,0.033511672
FI,S02_n73,0.033306897
FI,S03_n22,0.0328303
FI,S01_n22,0.032053612
FI,S01_n11,0.03204175
FI,S03_n23,0.031810462
FI,S03_n46,0.03179108
FI,S01_n17,0.03160481
FI,S01_n23,0.031258892
FI,S03_n82,0.029967144
FI,S03_n42,0.028577253
FI,S03_n44,0.025957784
FI,S01_n7,0.025956333
FI,S05_n5,0.022638919
FI,S01_n3,0.017222233
FI,S01_n6,0.014622795
FI,S03_n39,0.014497199
FI,S03_n97,0.0143600525
FI,S03_n28,0.014032738
FI,S03_n94,0.011053317
nF:28
oot， grp_0, 4112
oot, psi of (20240116, 20240119) vs (20240120, 20240123) : 0.006367387893084981
oot， grp_1, 3120
oot, psi of (20240120, 20240123) vs (20240124, 20240127) : 0.014337978683664501
oot， grp_2, 2618
oot, psi of (20240124, 20240127) vs (20240128, 20240131) : 0.005492307560678487
oot， grp_3, 2754
test， grp_0, 3000
test, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.014923248529564842
test， grp_1, 3130
test, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.014031340834306738
test， grp_2, 2667
test, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.019134514813470012
test， grp_3, 3635
test, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.014417802305195702
test， grp_4, 1787
train， grp_0, 6981
train, psi of (20231201, 20231210) vs (20231211, 20231220) : 0.009395826111874568
train， grp_1, 7317
train, psi of (20231211, 20231220) vs (20231221, 20231230) : 0.011627466733506378
train， grp_2, 6141
train, psi of (20231221, 20231230) vs (20231231, 20240109) : 0.012844483543269582
train， grp_3, 8531
train, psi of (20231231, 20240109) vs (20240110, 20240115) : 0.006635287036201923
train， grp_4, 4207
psiTrainTest:0.0008507860159574754
psiTrainOot:0.006107469667624674
psiTestOot:0.005184034691223171
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
6981
6981
train eff, auc:0.5862483546430035 - ks:0.12893949850471587
1 - (20231211, 20231220)
7317
7317
train eff, auc:0.5731286320981828 - ks:0.11665419704276742
2 - (20231221, 20231230)
6141
6141
train eff, auc:0.5807388281864052 - ks:0.1238802878889867
3 - (20231231, 20240109)
8531
8531
train eff, auc:0.5799888037748233 - ks:0.132357594339108
4 - (20240110, 20240115)
4207
4207
train eff, auc:0.5964904553901103 - ks:0.1514220382749717
in calc_date_ranges_effects ...
46
0 - (20231201, 20231210)
3000
3000
valid eff, auc:0.5817807706224918 - ks:0.14309507269107646
1 - (20231211, 20231220)
3130
3130
valid eff, auc:0.5706201104933377 - ks:0.12224083753464637
2 - (20231221, 20231230)
2667
2667
valid eff, auc:0.5354151353424308 - ks:0.06600460196222557
3 - (20231231, 20240109)
3635
3635
valid eff, auc:0.5714164443366481 - ks:0.12771444740884463
4 - (20240110, 20240115)
1787
1787
valid eff, auc:0.5450547397526435 - ks:0.11486754100810814
in calc_date_ranges_effects ...
16
0 - (20240116, 20240119)
4112
4112
oot eff, auc:0.5316321918362975 - ks:0.07054864491316637
1 - (20240120, 20240123)
3120
3120
oot eff, auc:0.5366327251723803 - ks:0.08143829244694173
2 - (20240124, 20240127)
2618
2618
oot eff, auc:0.5328327993685286 - ks:0.08065933045456664
3 - (20240128, 20240131)
2754
2754
oot eff, auc:0.5443414824858381 - ks:0.08262891271617159
'''