
import pandas as pd
import numpy as np

import math
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

import pandas as pd
import numpy as np

import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns

from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

from sklearn.linear_model import LogisticRegression


from sklearn.metrics import classification_report,roc_auc_score,accuracy_score 
from xgboost.sklearn import XGBClassifier


#更标准版：
from sklearn import metrics
def ks_score_V2(y_true,y_pred):
    tpr,fpr,threshold=metrics.roc_curve(np.array(y_true),y_pred)
    ks=abs(tpr-fpr).max()
    #print(threshold)
    return ks


# 定义自动分箱函数---最优分箱
def mono_bin(Y, X, n ):
    r = 0  #设定斯皮尔曼 初始值
#    good=Y.sum()   #好客户的人数
#    bad=Y.count()-good   #坏客户的人数
    good=Y.sum()   #目标客户的人数
    bad=Y.count()-good  #非目标客户的人数

#    seXNorm,seYNorm,seXAbnorm,seYAbnorm = split_abnorm_ptns(X,Y)

    rowN = n

  #下面这段就是分箱的核心 ，就是 机器来 选择 指定 最优的分箱节点，代替我们自己来设置
    while np.abs(r) < 0.95: #1:   #while ,不满足条件时，跳出循环
#        X = seXNorm
#        Y = seYNorm

        d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates="drop")}) #注意这里是pd.qcut, Bucket： 将 X 分为 n 段，n由 斯皮尔曼系数决定
        d2 = d1.groupby('Bucket', as_index = True,  observed=True)
#        print("d2.mean().X:")
#        print(str(d2.mean().X))
#        print("d2.mean().Y:")
#        print(str(d2.mean().Y))

        r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)    #　？　？　？    # 以斯皮尔曼系数作为分箱终止条件
#        print("r:" + str(r))
#        print("n:" + str(n))
        n = n - 1
#    print("d2:" + str(d2))
#    print("res n:" + str(n))
#    print("res r:" + str(r))
    if str(r) == 'nan':
      n = 1

    d3 = pd.DataFrame(d2.X.min(), columns = ['min'])
    d3['min']=d2.min().X    #  min 就是分箱的节点
    d3['max'] = d2.max().X
    d3['sum'] = d2.sum().Y
    d3['total'] = d2.count().Y
    d3['rate'] = d2.mean().Y
    d3['woe']=np.log((d3['rate']/(1-d3['rate']))/(good/bad))
    d3['goodattribute']=d3['sum']/good
    d3['badattribute']=(d3['total']-d3['sum'])/bad

    rawIvs = (d3['goodattribute']-d3['badattribute'])*d3['woe']
#    print("rawIvs:" + str(rawIvs))
    d3['bktIV'] = rawIvs

    iv=((d3['goodattribute']-d3['badattribute'])*d3['woe']).sum()   #返回 iv

#    dAbn = calc_abnorm_infos(seXAbnorm,seYAbnorm, good,bad)  # TODO
#    d3 = pd.concat([d3,dAbn],ignore_index=True)

    d4 = (d3.sort_values(by = 'min')).reset_index(drop=True)    # 返回 d
    woe=list(d4['woe'].round(3))             #返回 woe
    cut=[]    #  cut 存放箱段节点
    cut.append(float('-inf'))  # 在 列表前加 -inf
    for i in range(1,n+1):            # n 是前面的 分箱的 分割数  ，所以 分成n+1份
         qua=X.quantile(i/(n+1))     #quantile 分为数  得到分箱的节点
         cut.append(round(qua,4))   # 保留4位小数       #返回cut
    cut.append(float('inf')) # 在列表后加  inf
#    print("d4:" + str(d4))
#    print("iv:" + str(iv))
#    print("cut:" + str(cut))
#    print("woe:" + str(woe))
    return d4,iv,cut,woe


def bin_frequency(x,y,n=10): # x为待分箱的变量，y为target变量.n为分箱数量
    total = y.count()       #1 计算总样本数
    bad = y.sum()           #2 计算坏样本数
    good = total-bad        #3 计算好样本数
    if x.value_counts().shape[0]==2:    #4 如果该变量值是0和1则只分两组
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
    else:
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')})  #5 用pd.cut实现等频分箱
#        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,n,duplicates='drop')})
    d2 = d1.groupby('bucket',as_index=True)  #6 按照分箱结果进行分组聚合
    d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
    d3['min_bin'] = d2.x.min()               #7 箱体的左边界
    d3['max_bin'] = d2.x.max()               #8 箱体的右边界
    d3['bad'] = d2.y.sum()                   #9 每个箱体中坏样本的数量
    d3['total'] = d2.y.count()               #10 每个箱体的总样本数
    d3['bad_rate'] = d3['bad']/d3['total']   #11 每个箱体中坏样本所占总样本数的比例
    d3['badattr'] = d3['bad']/bad            #12 每个箱体中坏样本所占坏样本总数的比例
    d3['goodattr'] = (d3['total'] - d3['bad'])/good    #13 每个箱体中好样本所占好样本总数的比例
    d3['WOEi'] = np.log(d3['badattr']/d3['goodattr'])  #14 计算每个箱体的woe值
    IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum()  #15 计算变量的iv值
    d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']   #16 计算IV
    d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
    cut = []
    cut.append(float('-inf'))
    for i in d4.min_bin:
        cut.append(i)
    cut.append(float('inf'))
    WOEi = list(d4['WOEi'].round(3))
    return IV,cut,WOEi,d4


def cal_psi(actual, predict, bins=10):
    """
    功能: 计算PSI值，并输出实际和预期占比分布曲线
    :param actual: Array或series，代表真实数据，如训练集模型得分
    :param predict: Array或series，代表预期数据，如测试集模型得分
    :param bins: 分段数
    :return:
        psi: float，PSI值
        psi_df:DataFrame

    Examples
    -----------------------------------------------------------------
    >>> import random
    >>> act = np.array([random.random() for _ in range(5000000)])
    >>> pct = np.array([random.random() for _ in range(500000)])
    >>> psi, psi_df = cal_psi(act,pct)
    >>> psi
    1.65652278590053e-05
    >>> psi_df
       actual  predict  actual_rate  predict_rate           psi
    0  498285    49612     0.099657      0.099226  1.869778e-06
    1  500639    50213     0.100128      0.100428  8.975056e-07
    2  504335    50679     0.100867      0.101360  2.401777e-06
    3  493872    49376     0.098775      0.098754  4.296694e-09
    4  500719    49710     0.100144      0.099422  5.224199e-06
    5  504588    50691     0.100918      0.101384  2.148699e-06
    6  499988    50044     0.099998      0.100090  8.497110e-08
    7  496196    49548     0.099239      0.099098  2.016157e-07
    8  498963    50107     0.099793      0.100216  1.790906e-06
    9  502415    50020     0.100483      0.100042  1.941479e-06

    """
    actual_min = actual.min()  # 实际中的最小概率
    actual_max = actual.max()  # 实际中的最大概率
    binlen = (actual_max - actual_min) / bins
    cuts = [actual_min + i * binlen for i in range(1, bins)]#设定分组
    cuts.insert(0, -float("inf"))
    cuts.append(float("inf"))
    actual_cuts = np.histogram(actual, bins=cuts)#将actual“等宽”分箱
    predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
    actual_df = pd.DataFrame(actual_cuts[0],columns=['actual'])
    predict_df = pd.DataFrame(predict_cuts[0], columns=['predict'])
    psi_df = pd.merge(actual_df,predict_df,right_index=True,left_index=True)
    psi_df['actual_rate'] = (psi_df['actual'] + 1) / psi_df['actual'].sum()#计算占比，分子加1，防止计算PSI时分子分母为0
    psi_df['predict_rate'] = (psi_df['predict'] + 1) / psi_df['predict'].sum()
    psi_df['psi'] = (psi_df['actual_rate'] - psi_df['predict_rate']) * np.log(
        psi_df['actual_rate'] / psi_df['predict_rate'])
    psi = psi_df['psi'].sum()
    return psi, psi_df


################

def stat_group_labs_rate(vSL): #, numAllY0,numAllY1):
  numLab1 = 0
  numLabs = len(vSL)
  minScore = 100000
  maxScore = -100
  vScores = []
  for kv in vSL:
    (score, sIdx) = kv[0]
    lab = kv[1]
    vScores.append(score)
    if score > maxScore:
      maxScore = score
    if score < minScore:
      minScore = score
    if lab > 0:
      numLab1 += 1
  numLab0 = numLabs - numLab1
#  print("numLabs:" + str(numLabs))
#  print("numLab1:" + str(numLab1))
#  print("rateLab1:" + str(numLab1/numLabs))
  rateLab1 = numLab1 / numLabs
  avgScore = sum(vScores) / len(vScores)

  return minScore, maxScore, avgScore, rateLab1, numLab1, numLabs

def calc_cur_group_iv(numAllY0, numAllY1, numCurY0, numCurY1):
  allTotal = numAllY0 + numAllY1
  curTotal = numCurY0 + numCurY1

  bad_rate = numCurY1 / curTotal
#  print("bad_rate:" + str(bad_rate))
  badattr = numCurY1 / numAllY1
  goodattr = numCurY0 / numAllY0
  WOEi = np.log(badattr / goodattr)
  IVi = (badattr-goodattr) * WOEi

#  woe = np.log((bad_rate/(1-bad_rate)) / (numAllY1/numAllY0))
#  rIv = (badattr-goodattr) * woe
  return IVi #, rIv


def calc_score_labs_bins_by_us(scores, labs, numGroups=10):  # 等频分箱（箱内数据量一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  sSls = sorted(dScoreLab.items(), key=lambda x: x[0][0],reverse=True)
#  vSSLs = []
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(numGroups-1):
    begIdx = sIdx
    endIdx = sIdx + numInGrp
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))
#    vSSLs.append(vCurRng)

    sIdx += numInGrp

#  print("last rng:" + str(sIdx) + "," + str(numItems))
  vCurRng = sSls[sIdx : numItems]
  minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
  sumLabs += numLabs
  sumLab1 += numLab1
  curLift = rateLab1/rateAllLab1
  cumRateLab1 = sumLab1 / sumLabs
  cumLift = cumRateLab1 / rateAllLab1

  curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

  vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

  return vRes
  
  

def calc_score_labs_bins_by_us_v2(scores, labs, numGroups=10): # 等宽分箱（各箱宽度一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
#  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  print("sSls:" +str(sSls))
  # sSls:[((0.14229898, 26025), 0), ((0.13870734, 18148), 0), ((0.13861467, 25775), 0), ..., ((0.072855696, 20921), 0), ((0.07275263, 3106), 0), ((0.07274652, 16943), 0)]
  vsScores = []
  for kv in sSls:
    vsScores.append(kv[0][0])
  
  maxScore = sSls[0][0][0]
  minScore = sSls[-1][0][0]
  print("maxScore:" + str(maxScore))
  print("minScore:" + str(minScore))
  # maxScore:(0.14229898, 26025)
  # minScore:(0.07274652, 16943)
  lenBin = (maxScore-minScore) / numGroups
  print("lenBin:" + str(lenBin))
  cuts = [minScore + i * lenBin for i in range(1, numGroups)]#设定分组
  cuts.insert(0, -float("inf"))
  cuts.append(float("inf"))
  print("cuts:" + str(cuts))
  actual_cuts = np.histogram(vsScores, bins=cuts)#将actual“等宽”分箱
#  predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
  print("actual_cuts:" + str(actual_cuts))
  print(type(actual_cuts))
  vGroupsLen = actual_cuts[0].tolist()
  vGroupsLen = vGroupsLen[::-1]  
   
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(len(vGroupsLen)): #numGroups-1):
    cGrpNum = vGroupsLen[gIdx]
    begIdx = sIdx
    endIdx = sIdx + cGrpNum
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

    sIdx += cGrpNum

#  print("last rng:" + str(sIdx) + "," + str(numItems))

  return vRes
  


def calc_date_ranges_effects(serDt, vPreds, vLabs, nDaysInGroup, tag, fOut):
  print("in calc_date_ranges_effects ...")
  dDtFreq = {}
  pIdx = 0
  for dt in serDt:
    cP = vPreds[pIdx]
    cL = vLabs[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append((cP,cL)) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  print(len(sDtFreqs))
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = ([],[])
      dGrpDts[cGrpIdx] = []
      
    for pp in kv[1]: # [0]:
      dGrpPreds[cGrpIdx][0].append(pp[0])
      dGrpPreds[cGrpIdx][1].append(pp[1])
#      dGrpPreds[cGrpIdx][0].append(pp) #kv[1][0])
#    for ll in kv[1][1]:
#      dGrpPreds[cGrpIdx][1].append(ll) #kv[1][1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  for kv in sGrpFreqs:
    cGIdx = kv[0]
    dtRng = (dGrpDts[cGIdx][0], dGrpDts[cGIdx][-1])
    print(str(cGIdx) + " - " + str(dtRng))
    vCPreds = kv[1][0]
    vCLabs = kv[1][1]
    print(len(vCPreds))
    print(len(vCLabs))
    
    cAUC = roc_auc_score(pd.Series(vCLabs), pd.Series(vCPreds))
    cKS = ks_score_V2(pd.Series(vCLabs), pd.Series(vCPreds))
    print(tag + ", auc:" + str(cAUC) + " - ks:" + str(cKS))
  

def calc_oot_preds_psis(serOotDt, serPreds, nDaysInGroup, tag,fOut):
  dDtFreq = {}
  pIdx = 0
  for dt in serOotDt:
    cP = serPreds[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append(cP) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = []
      dGrpDts[cGrpIdx] = []
    dGrpPreds[cGrpIdx].append(kv[1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpPreds = []
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  #for gIdx in dGrpPreds:
  for kv in sGrpFreqs:
    gIdx = kv[0]
    cGDtsPreds = dGrpPreds[gIdx]
    cFInfo = ""
    cGPreds = []
    for cv in cGDtsPreds:
      cFInfo = cFInfo + str(len(cv)) + ";"
      for cp in cv:
        cGPreds.append(cp)
#    print(tag + ", " +str(gIdx) + ", " + cFInfo)
    vGrpPreds.append(cGPreds)
    vGrpDtRng.append((dGrpDts[gIdx][0], dGrpDts[gIdx][-1]))
  
  for gIdx in range(len(vGrpPreds)):
    print(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])))
    if fOut is not None:
      fOut.write(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])) + "\n")
    if gIdx < len(vGrpPreds)-1:
      cGPs = vGrpPreds[gIdx]
      nGPs = vGrpPreds[gIdx+1]
      cGRng = vGrpDtRng[gIdx]
      nGRng = vGrpDtRng[gIdx+1]
      psi,psidf = cal_psi(pd.Series(cGPs), pd.Series(nGPs))
      print(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi))
      if fOut is not None:
        fOut.write(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi) + "\n")




rawPtnsPath = "../恒昌12万_青禾苗.xlsx"

pdData = pd.read_excel(rawPtnsPath)
pdData['mn'] = pdData['apply_date'].apply(lambda x:str(x)[:6])


featPath = "D:/yrProj/20240612_众安15w/青禾苗-幸福消金-20240603_report.xlsx/银融_青禾苗_三版模型/银融_青禾苗_入模特征_子分1.1.txt"

vFeats = []
for f in open(featPath):
  f = f.strip()
  if len(f) > 0:
    vFeats.append(f)




print("vFeats:")
print(vFeats)
print(len(vFeats))


#colLab = 'y1'
colLab = 'y2'
#colLab = 'y3'

ptnProxPath = "./预估分/w12_" + colLab +"_prox"
testProxPath = "./预估分/k49_" + colLab +"_prox"



pdPtn_train = pdData[pdData['flag'] == 'train']
if colLab == 'y3':
  pdPtn_train = pdPtn_train[pdPtn_train['y3']>=0]
  
pdPtn_valid = pdData[pdData['flag'] == 'valid']
if colLab == 'y3':
  pdPtn_valid = pdPtn_valid[pdPtn_valid['y3']>=0]

pdPtn_oot = pdData[pdData['flag'] == 'oot']
if colLab == 'y3':
  pdPtn_oot = pdPtn_oot[pdPtn_oot['y3']>=0]


print("pdPtn_train.y3.value_counts():")
print(pdPtn_train.y3.value_counts())
print("pdPtn_valid.y3.value_counts():")
print(pdPtn_valid.y3.value_counts())
print("pdPtn_oot.y3.value_counts():")
print(pdPtn_oot.y3.value_counts())


dParams = {'learning_rate':0.02, 'n_estimators':120, 'max_depth':3, 'min_child_weight':100}
#dParams = {'learning_rate':0.02, 'n_estimators':100, 'max_depth':3, 'min_child_weight':60}

if True:
  learning_rate = dParams['learning_rate']
  n_estimators = dParams['n_estimators']
  max_depth = dParams['max_depth']
  min_child_weight = dParams['min_child_weight']
  
  print(">>>>>")
  print(colLab)
#  print(vFeats)
  print("learning_rate="+str(learning_rate))
  print("n_estimators="+str(n_estimators))
  print("max_depth="+str(max_depth))
  print("min_child_weight="+str(min_child_weight))

  train_x = pdPtn_train[vFeats]
  train_y = pdPtn_train[colLab]
  test_x = pdPtn_valid[vFeats]
  test_y = pdPtn_valid[colLab]


  # 更新寻优后的参数
  xgb1 = XGBClassifier(
   learning_rate =learning_rate,
   n_estimators=n_estimators,
   max_depth=max_depth,
   min_child_weight=min_child_weight, #gamma=2.0, reg_lambda=2.0, reg_alpha=2.0, # subsample=0.8, colsample_bytree=0.8, 
#   eta=0.01,
   objective= 'binary:logistic',
   nthread=4,
#   scale_pos_weight=1,
   seed=27)


  print("参数寻优后训练：")
  xgb1.fit(train_x,train_y) 
  prex=xgb1.predict(test_x) 
  prox=xgb1.predict_proba(test_x)[:,1]
  prox_train=xgb1.predict_proba(train_x)[:,1]

  train_prox=xgb1.predict_proba(train_x)[:,1]
  print('train ROC: {}'.format(roc_auc_score(train_y,train_prox)))
  ks2_train = ks_score_V2(train_y,prox_train)
  print("ks2_train:" + str(ks2_train))


  print('test accuracy: {}'.format(accuracy_score(test_y,prex))) 
  print('test ROC: {}'.format(roc_auc_score(test_y,prox)))
  ks2_test = ks_score_V2(test_y,prox)
  print("ks2_test:" + str(ks2_test))


  #prex_oot = xgb1.predict(pdPtn_oot.drop(toDrop,axis=1)) 
  #prox_oot = xgb1.predict_proba(pdPtn_oot.drop(toDrop,axis=1))[:,1]
  prex_oot = xgb1.predict(pdPtn_oot[vFeats]) 
  prox_oot = xgb1.predict_proba(pdPtn_oot[vFeats])[:,1]
  print('oot accuracy: {}'.format(accuracy_score(pdPtn_oot[colLab], prex_oot))) 
  print('oot ROC: {}'.format(roc_auc_score(pdPtn_oot[colLab], prox_oot)))
  ks2_oot = ks_score_V2(pdPtn_oot[colLab],prox_oot)
  print("ks2_oot:" + str(ks2_oot))


  vSLStats = calc_score_labs_bins_by_us(prox_oot, pdPtn_oot[colLab], numGroups=10)
  for sl in vSLStats:
    print(sl)


  fi = xgb1.feature_importances_
  tc = list(train_x.columns)

  dFws = {}
  for fIdx in range(len(fi)):
    cc = tc[fIdx]
    cw = fi[fIdx]
    dFws[cc] = cw

  sFMWs = sorted(dFws.items(), key=lambda x: x[1],reverse=True)

  # 记录保存特征指标
  nF = 0
  for kv in sFMWs:
    ft = kv[0]
    wgt = kv[1]
    if wgt > 0.0:
      print("FI," + ft + "," + str(wgt))
      nF += 1
  print("nF:" + str(nF))
  
  calc_oot_preds_psis(pdPtn_oot['apply_date'], prox_oot, 4, "oot", None)

  calc_oot_preds_psis(pdPtn_valid['apply_date'], prox, 10, "test", None)
  calc_oot_preds_psis(pdPtn_train['apply_date'], prox_train, 10, "train", None)
#  calc_oot_preds_psis(pdData['apply_date'], prox_train, 10, None)

  psiTrainTest,_1 = cal_psi(pd.Series(prox_train), pd.Series(prox))
  psiTrainOot,_2 = cal_psi(pd.Series(prox_train), pd.Series(prox_oot))
  psiTestOot,_3 = cal_psi(pd.Series(prox), pd.Series(prox_oot))
  print("psiTrainTest:" + str(psiTrainTest))
  print("psiTrainOot:" + str(psiTrainOot))
  print("psiTestOot:" + str(psiTestOot))
  
  calc_date_ranges_effects(pdPtn_train['apply_date'], prox_train.tolist(), pdPtn_train[colLab].tolist(), 10, "train eff", None)
  calc_date_ranges_effects(pdPtn_valid['apply_date'], prox.tolist(), pdPtn_valid[colLab].tolist(), 10, "valid eff", None)
  calc_date_ranges_effects(pdPtn_oot['apply_date'], prox_oot.tolist(), pdPtn_oot[colLab].tolist(), 4, "oot eff", None)



# pdData
# ptnProxPath

# D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx
# testProxPath

prox_ptn = xgb1.predict_proba(pdData[vFeats])[:,1]
fOutPtn = open(ptnProxPath, 'w')
for pp in prox_ptn:
  fOutPtn.write(str(pp) + "\n")
fOutPtn.close()

pdTest = pd.read_excel("D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx") #, encoding='utf-8')
prox_test = xgb1.predict_proba(pdTest[vFeats])[:,1]
fOutTest = open(testProxPath, 'w')
for pp in prox_test:
  fOutTest.write(str(pp) + "\n")
fOutTest.close()


'''
>>>>>
y2   <<<<<<<<<<<<<<<<
learning_rate=0.02
n_estimators=120
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.6337795623482269
ks2_train:0.18988452187959892
test accuracy: 0.9265586377368855
test ROC: 0.6059427471418266
ks2_test:0.1547575002274691
oot accuracy: 0.935927204267336
oot ROC: 0.5976965864492172
ks2_oot:0.1382134489125179
(0.0971, 0.2311, 0.1105, 3187, 0.118, 1.8413, 1.8413, 0.0602)
(0.0878, 0.0971, 0.0921, 3187, 0.0744, 1.1606, 1.501, 0.0027)
(0.0821, 0.0878, 0.0847, 3187, 0.0762, 1.19, 1.3973, 0.0038)
(0.0785, 0.0821, 0.0802, 3187, 0.0687, 1.0725, 1.3161, 0.0006)
(0.0755, 0.0785, 0.077, 3187, 0.0631, 0.9843, 1.2498, 0.0)
(0.0721, 0.0755, 0.0738, 3187, 0.059, 0.9207, 1.1949, 0.0007)
(0.0685, 0.0721, 0.0704, 3187, 0.0574, 0.8962, 1.1522, 0.0013)
(0.064, 0.0685, 0.0664, 3187, 0.0455, 0.7101, 1.097, 0.0112)
(0.0569, 0.064, 0.0608, 3187, 0.0474, 0.7395, 1.0572, 0.0089)
(0.04, 0.0569, 0.0523, 3187, 0.0311, 0.4848, 1.0, 0.0418)
FI,S02_n50,0.03784407
FI,S01_n23,0.029600581
FI,S02_n55,0.02707062
FI,S03_n23,0.025489047
FI,S03_n43,0.024172643
FI,S02_n62,0.024109773
FI,S03_n92,0.023814728
FI,S02_n48,0.022935735
FI,S03_n102,0.021958653
FI,S03_n75,0.020962324
FI,S02_n73,0.020593699
FI,S02_n82,0.019605689
FI,S03_n100,0.019174898
FI,S03_n83,0.018556349
FI,S02_n88,0.018246474
FI,S03_n87,0.017629948
FI,S01_n1,0.016708817
FI,S01_n17,0.016704341
FI,S03_n74,0.01667314
FI,S02_n83,0.016623216
FI,S03_n48,0.016554851
FI,S02_n53,0.016402615
FI,S02_n40,0.016377978
FI,S01_n22,0.016102951
FI,S03_n53,0.015533793
FI,S05_n2,0.015506904
FI,S03_n65,0.015162113
FI,S03_n55,0.015046916
FI,S02_n85,0.01481002
FI,S05_n1,0.014700343
FI,S03_n26,0.01454785
FI,S03_n27,0.014451711
FI,S03_n69,0.014084819
FI,S03_n104,0.014025898
FI,S05_n12,0.013604164
FI,S03_n70,0.013413531
FI,S02_n64,0.013238367
FI,S03_n94,0.013160002
FI,S02_n65,0.012444567
FI,S01_n8,0.012428086
FI,S04_n14,0.012337712
FI,S03_n51,0.012084078
FI,S02_n74,0.011745431
FI,S03_n73,0.011583074
FI,S03_n97,0.0113700135
FI,S03_n82,0.011013217
FI,S03_n29,0.010937994
FI,S05_n7,0.01084211
FI,S03_n20,0.010764088
FI,S03_n42,0.010728308
FI,S02_n70,0.010638839
FI,S03_n86,0.010006996
FI,S02_n84,0.009636185
FI,S01_n11,0.009186215
FI,S03_n4,0.009104986
FI,S01_n21,0.009084664
FI,S02_n44,0.009069759
FI,S03_n80,0.008707547
FI,S01_n2,0.008528818
FI,S02_n69,0.008254216
FI,S03_n84,0.008130507
FI,S05_n8,0.007347282
FI,S03_n50,0.0067779706
FI,S02_n79,0.0065958896
FI,S01_n9,0.006435318
FI,S04_n4,0.0056555206
FI,S03_n105,0.0055891722
FI,S02_n41,0.0048474036
FI,S03_n62,0.0034372984
FI,S04_n3,0.0033871003
FI,S03_n49,0.0031870771
FI,S01_n4,0.0029129512
nF:72
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.005321836240209145
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0031111754422442142
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.0031506842870256393
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.022703029047294952
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.003680641630555113
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0065073842854661045
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.024440697010061102
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00811227791599596
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.009493540038059944
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.07849961379146078
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.00391349837035567
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0037324854844311304
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.007317673988509445
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.005377011079783465
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.004031072029911397
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.015910933755837423
train， grp_6, 999
psiTrainTest:0.0005620339333991199
psiTrainOot:0.004282674664962277
psiTestOot:0.003119731753930208
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6363392730366024 - ks:0.2074339881051876
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6425499582211336 - ks:0.19824151554188657
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6270373962346469 - ks:0.18289643694155322
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6231032012118697 - ks:0.18016483610630385
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6494542118032094 - ks:0.23080638553886923
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6213013129676118 - ks:0.18226934782720855
6 - (20240112, 20240112)
999
999
train eff, auc:0.6501074113856069 - ks:0.27103999494534653
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6143002087198516 - ks:0.17878787878787877
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6264286862154123 - ks:0.18767156533175045
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.5883770664650765 - ks:0.139936737820041
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5964568356833551 - ks:0.1860662556795154
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6305864691415235 - ks:0.19579945308898528
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5782829337254352 - ks:0.130024426834324
6 - (20240112, 20240112)
249
249
valid eff, auc:0.529774678111588 - ks:0.1330472103004292
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6021909765999793 - ks:0.1472760108158685
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6185242127705013 - ks:0.1823702338960086
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5799987397236086 - ks:0.11863749969251275
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5891943045940843 - ks:0.13998615481434862
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6041852819311836 - ks:0.1673572329310034




>>>>>
y2
learning_rate=0.02
n_estimators=100
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.628127163978609
ks2_train:0.18179149773595088
test accuracy: 0.9265586377368855
test ROC: 0.6038760154299414
ks2_test:0.15400060019910866
oot accuracy: 0.935927204267336
oot ROC: 0.5937857542893326
ks2_oot:0.13589847873482136
(0.0996, 0.2248, 0.1118, 3187, 0.118, 1.8413, 1.8413, 0.0602)
(0.0911, 0.0996, 0.095, 3187, 0.0734, 1.1459, 1.4936, 0.0023)
(0.0856, 0.0911, 0.0882, 3187, 0.0703, 1.097, 1.3614, 0.001)
(0.0823, 0.0856, 0.0838, 3187, 0.075, 1.1704, 1.3137, 0.0031)
(0.0796, 0.0823, 0.081, 3187, 0.0577, 0.9011, 1.2311, 0.0012)
(0.0765, 0.0796, 0.0781, 3187, 0.059, 0.9207, 1.1794, 0.0007)
(0.0729, 0.0765, 0.0748, 3187, 0.0596, 0.9305, 1.1438, 0.0006)
(0.0687, 0.0729, 0.0709, 3187, 0.048, 0.7493, 1.0945, 0.0082)
(0.0613, 0.0687, 0.0652, 3187, 0.048, 0.7493, 1.0562, 0.0082)
(0.0473, 0.0613, 0.0572, 3187, 0.0317, 0.4946, 1.0, 0.0399)
FI,S02_n50,0.043524895
FI,S01_n23,0.036102865
FI,S02_n55,0.033671424
FI,S03_n23,0.030294474
FI,S03_n102,0.028373139
FI,S03_n43,0.027801234
FI,S02_n48,0.027797114
FI,S02_n62,0.027728925
FI,S03_n92,0.027389592
FI,S03_n75,0.024416722
FI,S03_n100,0.023163369
FI,S02_n82,0.022548728
FI,S03_n83,0.021341868
FI,S02_n88,0.020985479
FI,S02_n83,0.020711541
FI,S03_n87,0.020537594
FI,S02_n53,0.020092309
FI,S03_n74,0.019186957
FI,S01_n22,0.019104706
FI,S01_n17,0.01908654
FI,S03_n48,0.019039925
FI,S05_n2,0.01869592
FI,S03_n55,0.018424997
FI,S02_n85,0.018214976
FI,S02_n40,0.017515527
FI,S05_n1,0.016703961
FI,S03_n69,0.016536484
FI,S03_n27,0.016453281
FI,S03_n104,0.016131345
FI,S03_n51,0.015621558
FI,S02_n64,0.01545975
FI,S03_n70,0.015427055
FI,S01_n8,0.014722883
FI,S02_n65,0.014312639
FI,S03_n94,0.014248631
FI,S03_n82,0.014218637
FI,S04_n14,0.013466897
FI,S03_n73,0.013321825
FI,S02_n74,0.012612369
FI,S03_n29,0.012579912
FI,S05_n12,0.01254987
FI,S03_n86,0.012432917
FI,S03_n20,0.0123799
FI,S02_n84,0.012262589
FI,S01_n21,0.011391964
FI,S03_n42,0.010721455
FI,S01_n11,0.010511946
FI,S02_n44,0.010059182
FI,S03_n80,0.0100146495
FI,S01_n2,0.009809092
FI,S03_n84,0.009350989
FI,S05_n8,0.008450193
FI,S03_n50,0.0077954214
FI,S02_n79,0.007586008
FI,S02_n41,0.0055750543
FI,S02_n69,0.00554158
FI,S03_n105,0.0046445164
FI,S04_n3,0.003876396
FI,S03_n62,0.0038127222
FI,S03_n49,0.003665494
nF:60
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.004044037928323745
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.003042139120742569
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.003670852206667928
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.02595755353340288
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0039750988842577354
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.005385101164565211
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.01914370282519827
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.006728593180653357
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.008592228566218435
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.082905833153015
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.004089910269973938
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.003974779594575771
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.007572118184859305
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.006678431990520087
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0024478173699385014
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.00891930619083975
train， grp_6, 999
psiTrainTest:0.0006205597209639586
psiTrainOot:0.0033355039558337755
psiTestOot:0.003105536209798487
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6302742051317208 - ks:0.2002147053937477
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6371211161176997 - ks:0.1872889467932416
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6218829997284093 - ks:0.1815459413973185
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6167771222156857 - ks:0.16627161593834539
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6434592461938877 - ks:0.21708714000445983
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6169340221784114 - ks:0.1778539020891413
6 - (20240112, 20240112)
999
999
train eff, auc:0.6371390661527769 - ks:0.2503001200480192
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6129338667285096 - ks:0.17064007421150276
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6230621521548769 - ks:0.1900066576367685
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.5895915809576859 - ks:0.14673045934708656
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5900296508031315 - ks:0.1675803664753941
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6308459260439219 - ks:0.19169400014964033
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5766589518830136 - ks:0.13033460885748632
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5303111587982832 - ks:0.15021459227467815
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.5978840069462616 - ks:0.14837028490774007
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6152515931002155 - ks:0.18258319020618152
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5759710783416785 - ks:0.11395223687265443
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5861936752674639 - ks:0.13726872246696037
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.5994604575752117 - ks:0.16891551071878946



>>>>>
y2
learning_rate=0.03
n_estimators=100
max_depth=3
min_child_weight=120
参数寻优后训练：
train ROC: 0.6399104062547505
ks2_train:0.20088474887096514
test accuracy: 0.9265586377368855
test ROC: 0.6092801131368225
ks2_test:0.15550757174942248
oot accuracy: 0.935927204267336
oot ROC: 0.6023578277127092
ks2_oot:0.14460458046308466
(0.0949, 0.2369, 0.1097, 3187, 0.1167, 1.8217, 1.8217, 0.0577)
(0.085, 0.0949, 0.0896, 3187, 0.0813, 1.2684, 1.5451, 0.0073)
(0.0791, 0.085, 0.0818, 3187, 0.0709, 1.1068, 1.399, 0.0012)
(0.0753, 0.0791, 0.0771, 3187, 0.0715, 1.1166, 1.3284, 0.0015)
(0.0718, 0.0753, 0.0735, 3187, 0.0634, 0.9892, 1.2605, 0.0)
(0.0682, 0.0718, 0.07, 3187, 0.059, 0.9207, 1.2039, 0.0007)
(0.0643, 0.0682, 0.0663, 3187, 0.0562, 0.8766, 1.1571, 0.0018)
(0.0595, 0.0643, 0.062, 3187, 0.0464, 0.7248, 1.1031, 0.01)
(0.0523, 0.0595, 0.0562, 3187, 0.0442, 0.6905, 1.0572, 0.0129)
(0.0323, 0.0523, 0.0469, 3187, 0.0311, 0.4848, 1.0, 0.0418)
FI,S02_n50,0.037553586
FI,S01_n23,0.02752331
FI,S02_n62,0.026064849
FI,S03_n23,0.025051739
FI,S03_n43,0.024342854
FI,S03_n92,0.024215644
FI,S03_n102,0.021438438
FI,S02_n48,0.020928077
FI,S02_n55,0.019046122
FI,S03_n83,0.018905269
FI,S02_n82,0.018796192
FI,S03_n75,0.018415255
FI,S02_n88,0.018379554
FI,S03_n87,0.018225105
FI,S02_n83,0.017674342
FI,S03_n100,0.017580606
FI,S05_n2,0.015874932
FI,S02_n53,0.015728237
FI,S05_n1,0.015706653
FI,S02_n40,0.015597722
FI,S03_n48,0.015513739
FI,S01_n17,0.015174345
FI,S03_n27,0.014913564
FI,S03_n69,0.014724505
FI,S03_n74,0.014706159
FI,S01_n22,0.014651424
FI,S03_n65,0.014458667
FI,S03_n55,0.01437949
FI,S03_n53,0.014116734
FI,S01_n19,0.01407267
FI,S02_n74,0.014043556
FI,S05_n12,0.0137545625
FI,S04_n14,0.013609809
FI,S03_n70,0.013586025
FI,S02_n64,0.013584006
FI,S03_n51,0.01298081
FI,S02_n65,0.012763298
FI,S01_n1,0.012761786
FI,S03_n29,0.0126619255
FI,S03_n94,0.012378701
FI,S03_n26,0.012250049
FI,S02_n85,0.012111055
FI,S03_n73,0.011793803
FI,S03_n86,0.011332951
FI,S02_n44,0.011263785
FI,S01_n8,0.011151404
FI,S01_n6,0.010975398
FI,S03_n104,0.010813277
FI,S03_n106,0.010736035
FI,S05_n7,0.010673788
FI,S02_n84,0.009979846
FI,S03_n80,0.009896809
FI,S03_n20,0.009890695
FI,S04_n27,0.009818625
FI,S03_n42,0.009810511
FI,S03_n4,0.009599228
FI,S01_n21,0.009581157
FI,S03_n82,0.009564274
FI,S05_n4,0.009544959
FI,S02_n59,0.008505476
FI,S03_n84,0.00831947
FI,S01_n11,0.007939052
FI,S03_n15,0.007204186
FI,S01_n9,0.0071273535
FI,S03_n105,0.007001661
FI,S02_n79,0.006804562
FI,S02_n69,0.0066457195
FI,S05_n8,0.0065364856
FI,S03_n50,0.0064755515
FI,S03_n85,0.0052367896
FI,S01_n2,0.005156883
FI,S01_n4,0.004268799
FI,S02_n41,0.0036553752
FI,S03_n62,0.0034362052
FI,S03_n49,0.0033703086
FI,S04_n4,0.002947105
FI,S03_n31,0.0026970343
nF:77
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.0033572452359792123
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0041383086267470425
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.0038333145333779442
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.020248013102795438
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.006677276255969851
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.006203697248654693
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.019188402439331197
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00328527872977286
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0072185271398280525
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.07493702205524856
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0033391184652506742
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0026869225149711577
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.004945262210472518
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.0032987885928751084
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0032321563348864605
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.00977005075759296
train， grp_6, 999
psiTrainTest:0.000526595492516324
psiTrainOot:0.005073201966669088
psiTestOot:0.0036324413278259913
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6415801310468056 - ks:0.2144137107444361
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6479736349181895 - ks:0.20887875609809337
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6342824039690772 - ks:0.2007869293410539
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6287743527355597 - ks:0.18320948730982828
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6560329275305945 - ks:0.24179438470949144
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6274500980010537 - ks:0.1942333684367623
6 - (20240112, 20240112)
999
999
train eff, auc:0.6534719150818221 - ks:0.26715423011309786
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.61643475572047 - ks:0.1749226963512678
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6315133333114691 - ks:0.19129800959617993
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.5928312809596754 - ks:0.14688563073189176
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.6020446255252886 - ks:0.1856246933595
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6316580864872431 - ks:0.19066099964520777
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5766537324739701 - ks:0.11895182336326793
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5351394849785407 - ks:0.160675965665236
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.609419727462315 - ks:0.1560564700938062
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6218126684270251 - ks:0.18535554353026062
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5841664130950132 - ks:0.12127936250232929
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5916129641283826 - ks:0.1422385147891756
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6109709962168979 - ks:0.17323004863988473


'''
