
import pandas as pd
import numpy as np

import math
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

import pandas as pd
import numpy as np

import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns

from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

from sklearn.linear_model import LogisticRegression


from sklearn.metrics import classification_report,roc_auc_score,accuracy_score 
from xgboost.sklearn import XGBClassifier


#更标准版：
from sklearn import metrics
def ks_score_V2(y_true,y_pred):
    tpr,fpr,threshold=metrics.roc_curve(np.array(y_true),y_pred)
    ks=abs(tpr-fpr).max()
    #print(threshold)
    return ks


# 定义自动分箱函数---最优分箱
def mono_bin(Y, X, n ):
    r = 0  #设定斯皮尔曼 初始值
#    good=Y.sum()   #好客户的人数
#    bad=Y.count()-good   #坏客户的人数
    good=Y.sum()   #目标客户的人数
    bad=Y.count()-good  #非目标客户的人数

#    seXNorm,seYNorm,seXAbnorm,seYAbnorm = split_abnorm_ptns(X,Y)

    rowN = n

  #下面这段就是分箱的核心 ，就是 机器来 选择 指定 最优的分箱节点，代替我们自己来设置
    while np.abs(r) < 0.95: #1:   #while ,不满足条件时，跳出循环
#        X = seXNorm
#        Y = seYNorm

        d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates="drop")}) #注意这里是pd.qcut, Bucket： 将 X 分为 n 段，n由 斯皮尔曼系数决定
        d2 = d1.groupby('Bucket', as_index = True,  observed=True)
#        print("d2.mean().X:")
#        print(str(d2.mean().X))
#        print("d2.mean().Y:")
#        print(str(d2.mean().Y))

        r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)    #　？　？　？    # 以斯皮尔曼系数作为分箱终止条件
#        print("r:" + str(r))
#        print("n:" + str(n))
        n = n - 1
#    print("d2:" + str(d2))
#    print("res n:" + str(n))
#    print("res r:" + str(r))
    if str(r) == 'nan':
      n = 1

    d3 = pd.DataFrame(d2.X.min(), columns = ['min'])
    d3['min']=d2.min().X    #  min 就是分箱的节点
    d3['max'] = d2.max().X
    d3['sum'] = d2.sum().Y
    d3['total'] = d2.count().Y
    d3['rate'] = d2.mean().Y
    d3['woe']=np.log((d3['rate']/(1-d3['rate']))/(good/bad))
    d3['goodattribute']=d3['sum']/good
    d3['badattribute']=(d3['total']-d3['sum'])/bad

    rawIvs = (d3['goodattribute']-d3['badattribute'])*d3['woe']
#    print("rawIvs:" + str(rawIvs))
    d3['bktIV'] = rawIvs

    iv=((d3['goodattribute']-d3['badattribute'])*d3['woe']).sum()   #返回 iv

#    dAbn = calc_abnorm_infos(seXAbnorm,seYAbnorm, good,bad)  # TODO
#    d3 = pd.concat([d3,dAbn],ignore_index=True)

    d4 = (d3.sort_values(by = 'min')).reset_index(drop=True)    # 返回 d
    woe=list(d4['woe'].round(3))             #返回 woe
    cut=[]    #  cut 存放箱段节点
    cut.append(float('-inf'))  # 在 列表前加 -inf
    for i in range(1,n+1):            # n 是前面的 分箱的 分割数  ，所以 分成n+1份
         qua=X.quantile(i/(n+1))     #quantile 分为数  得到分箱的节点
         cut.append(round(qua,4))   # 保留4位小数       #返回cut
    cut.append(float('inf')) # 在列表后加  inf
#    print("d4:" + str(d4))
#    print("iv:" + str(iv))
#    print("cut:" + str(cut))
#    print("woe:" + str(woe))
    return d4,iv,cut,woe


def bin_frequency(x,y,n=10): # x为待分箱的变量，y为target变量.n为分箱数量
    total = y.count()       #1 计算总样本数
    bad = y.sum()           #2 计算坏样本数
    good = total-bad        #3 计算好样本数
    if x.value_counts().shape[0]==2:    #4 如果该变量值是0和1则只分两组
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
    else:
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')})  #5 用pd.cut实现等频分箱
#        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,n,duplicates='drop')})
    d2 = d1.groupby('bucket',as_index=True)  #6 按照分箱结果进行分组聚合
    d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
    d3['min_bin'] = d2.x.min()               #7 箱体的左边界
    d3['max_bin'] = d2.x.max()               #8 箱体的右边界
    d3['bad'] = d2.y.sum()                   #9 每个箱体中坏样本的数量
    d3['total'] = d2.y.count()               #10 每个箱体的总样本数
    d3['bad_rate'] = d3['bad']/d3['total']   #11 每个箱体中坏样本所占总样本数的比例
    d3['badattr'] = d3['bad']/bad            #12 每个箱体中坏样本所占坏样本总数的比例
    d3['goodattr'] = (d3['total'] - d3['bad'])/good    #13 每个箱体中好样本所占好样本总数的比例
    d3['WOEi'] = np.log(d3['badattr']/d3['goodattr'])  #14 计算每个箱体的woe值
    IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum()  #15 计算变量的iv值
    d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']   #16 计算IV
    d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
    cut = []
    cut.append(float('-inf'))
    for i in d4.min_bin:
        cut.append(i)
    cut.append(float('inf'))
    WOEi = list(d4['WOEi'].round(3))
    return IV,cut,WOEi,d4


def cal_psi(actual, predict, bins=10):
    """
    功能: 计算PSI值，并输出实际和预期占比分布曲线
    :param actual: Array或series，代表真实数据，如训练集模型得分
    :param predict: Array或series，代表预期数据，如测试集模型得分
    :param bins: 分段数
    :return:
        psi: float，PSI值
        psi_df:DataFrame

    Examples
    -----------------------------------------------------------------
    >>> import random
    >>> act = np.array([random.random() for _ in range(5000000)])
    >>> pct = np.array([random.random() for _ in range(500000)])
    >>> psi, psi_df = cal_psi(act,pct)
    >>> psi
    1.65652278590053e-05
    >>> psi_df
       actual  predict  actual_rate  predict_rate           psi
    0  498285    49612     0.099657      0.099226  1.869778e-06
    1  500639    50213     0.100128      0.100428  8.975056e-07
    2  504335    50679     0.100867      0.101360  2.401777e-06
    3  493872    49376     0.098775      0.098754  4.296694e-09
    4  500719    49710     0.100144      0.099422  5.224199e-06
    5  504588    50691     0.100918      0.101384  2.148699e-06
    6  499988    50044     0.099998      0.100090  8.497110e-08
    7  496196    49548     0.099239      0.099098  2.016157e-07
    8  498963    50107     0.099793      0.100216  1.790906e-06
    9  502415    50020     0.100483      0.100042  1.941479e-06

    """
    actual_min = actual.min()  # 实际中的最小概率
    actual_max = actual.max()  # 实际中的最大概率
    binlen = (actual_max - actual_min) / bins
    cuts = [actual_min + i * binlen for i in range(1, bins)]#设定分组
    cuts.insert(0, -float("inf"))
    cuts.append(float("inf"))
    actual_cuts = np.histogram(actual, bins=cuts)#将actual“等宽”分箱
    predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
    actual_df = pd.DataFrame(actual_cuts[0],columns=['actual'])
    predict_df = pd.DataFrame(predict_cuts[0], columns=['predict'])
    psi_df = pd.merge(actual_df,predict_df,right_index=True,left_index=True)
    psi_df['actual_rate'] = (psi_df['actual'] + 1) / psi_df['actual'].sum()#计算占比，分子加1，防止计算PSI时分子分母为0
    psi_df['predict_rate'] = (psi_df['predict'] + 1) / psi_df['predict'].sum()
    psi_df['psi'] = (psi_df['actual_rate'] - psi_df['predict_rate']) * np.log(
        psi_df['actual_rate'] / psi_df['predict_rate'])
    psi = psi_df['psi'].sum()
    return psi, psi_df


################

def stat_group_labs_rate(vSL): #, numAllY0,numAllY1):
  numLab1 = 0
  numLabs = len(vSL)
  minScore = 100000
  maxScore = -100
  vScores = []
  for kv in vSL:
    (score, sIdx) = kv[0]
    lab = kv[1]
    vScores.append(score)
    if score > maxScore:
      maxScore = score
    if score < minScore:
      minScore = score
    if lab > 0:
      numLab1 += 1
  numLab0 = numLabs - numLab1
#  print("numLabs:" + str(numLabs))
#  print("numLab1:" + str(numLab1))
#  print("rateLab1:" + str(numLab1/numLabs))
  rateLab1 = numLab1 / numLabs
  avgScore = sum(vScores) / len(vScores)

  return minScore, maxScore, avgScore, rateLab1, numLab1, numLabs

def calc_cur_group_iv(numAllY0, numAllY1, numCurY0, numCurY1):
  allTotal = numAllY0 + numAllY1
  curTotal = numCurY0 + numCurY1

  bad_rate = numCurY1 / curTotal
#  print("bad_rate:" + str(bad_rate))
  badattr = numCurY1 / numAllY1
  goodattr = numCurY0 / numAllY0
  WOEi = np.log(badattr / goodattr)
  IVi = (badattr-goodattr) * WOEi

#  woe = np.log((bad_rate/(1-bad_rate)) / (numAllY1/numAllY0))
#  rIv = (badattr-goodattr) * woe
  return IVi #, rIv


def calc_score_labs_bins_by_us(scores, labs, numGroups=10):  # 等频分箱（箱内数据量一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  sSls = sorted(dScoreLab.items(), key=lambda x: x[0][0],reverse=True)
#  vSSLs = []
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(numGroups-1):
    begIdx = sIdx
    endIdx = sIdx + numInGrp
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))
#    vSSLs.append(vCurRng)

    sIdx += numInGrp

#  print("last rng:" + str(sIdx) + "," + str(numItems))
  vCurRng = sSls[sIdx : numItems]
  minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
  sumLabs += numLabs
  sumLab1 += numLab1
  curLift = rateLab1/rateAllLab1
  cumRateLab1 = sumLab1 / sumLabs
  cumLift = cumRateLab1 / rateAllLab1

  curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

  vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

  return vRes
  
  

def calc_score_labs_bins_by_us_v2(scores, labs, numGroups=10): # 等宽分箱（各箱宽度一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
#  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  print("sSls:" +str(sSls))
  # sSls:[((0.14229898, 26025), 0), ((0.13870734, 18148), 0), ((0.13861467, 25775), 0), ..., ((0.072855696, 20921), 0), ((0.07275263, 3106), 0), ((0.07274652, 16943), 0)]
  vsScores = []
  for kv in sSls:
    vsScores.append(kv[0][0])
  
  maxScore = sSls[0][0][0]
  minScore = sSls[-1][0][0]
  print("maxScore:" + str(maxScore))
  print("minScore:" + str(minScore))
  # maxScore:(0.14229898, 26025)
  # minScore:(0.07274652, 16943)
  lenBin = (maxScore-minScore) / numGroups
  print("lenBin:" + str(lenBin))
  cuts = [minScore + i * lenBin for i in range(1, numGroups)]#设定分组
  cuts.insert(0, -float("inf"))
  cuts.append(float("inf"))
  print("cuts:" + str(cuts))
  actual_cuts = np.histogram(vsScores, bins=cuts)#将actual“等宽”分箱
#  predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
  print("actual_cuts:" + str(actual_cuts))
  print(type(actual_cuts))
  vGroupsLen = actual_cuts[0].tolist()
  vGroupsLen = vGroupsLen[::-1]  
   
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(len(vGroupsLen)): #numGroups-1):
    cGrpNum = vGroupsLen[gIdx]
    begIdx = sIdx
    endIdx = sIdx + cGrpNum
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

    sIdx += cGrpNum

#  print("last rng:" + str(sIdx) + "," + str(numItems))

  return vRes
  


def calc_date_ranges_effects(serDt, vPreds, vLabs, nDaysInGroup, tag, fOut):
  print("in calc_date_ranges_effects ...")
  dDtFreq = {}
  pIdx = 0
  for dt in serDt:
    cP = vPreds[pIdx]
    cL = vLabs[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append((cP,cL)) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  print(len(sDtFreqs))
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = ([],[])
      dGrpDts[cGrpIdx] = []
      
    for pp in kv[1]: # [0]:
      dGrpPreds[cGrpIdx][0].append(pp[0])
      dGrpPreds[cGrpIdx][1].append(pp[1])
#      dGrpPreds[cGrpIdx][0].append(pp) #kv[1][0])
#    for ll in kv[1][1]:
#      dGrpPreds[cGrpIdx][1].append(ll) #kv[1][1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  for kv in sGrpFreqs:
    cGIdx = kv[0]
    dtRng = (dGrpDts[cGIdx][0], dGrpDts[cGIdx][-1])
    print(str(cGIdx) + " - " + str(dtRng))
    vCPreds = kv[1][0]
    vCLabs = kv[1][1]
    print(len(vCPreds))
    print(len(vCLabs))
    
    cAUC = roc_auc_score(pd.Series(vCLabs), pd.Series(vCPreds))
    cKS = ks_score_V2(pd.Series(vCLabs), pd.Series(vCPreds))
    print(tag + ", auc:" + str(cAUC) + " - ks:" + str(cKS))
  

def calc_oot_preds_psis(serOotDt, serPreds, nDaysInGroup, tag,fOut):
  dDtFreq = {}
  pIdx = 0
  for dt in serOotDt:
    cP = serPreds[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append(cP) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = []
      dGrpDts[cGrpIdx] = []
    dGrpPreds[cGrpIdx].append(kv[1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpPreds = []
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  #for gIdx in dGrpPreds:
  for kv in sGrpFreqs:
    gIdx = kv[0]
    cGDtsPreds = dGrpPreds[gIdx]
    cFInfo = ""
    cGPreds = []
    for cv in cGDtsPreds:
      cFInfo = cFInfo + str(len(cv)) + ";"
      for cp in cv:
        cGPreds.append(cp)
#    print(tag + ", " +str(gIdx) + ", " + cFInfo)
    vGrpPreds.append(cGPreds)
    vGrpDtRng.append((dGrpDts[gIdx][0], dGrpDts[gIdx][-1]))
  
  for gIdx in range(len(vGrpPreds)):
    print(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])))
    if fOut is not None:
      fOut.write(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])) + "\n")
    if gIdx < len(vGrpPreds)-1:
      cGPs = vGrpPreds[gIdx]
      nGPs = vGrpPreds[gIdx+1]
      cGRng = vGrpDtRng[gIdx]
      nGRng = vGrpDtRng[gIdx+1]
      psi,psidf = cal_psi(pd.Series(cGPs), pd.Series(nGPs))
      print(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi))
      if fOut is not None:
        fOut.write(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi) + "\n")




rawPtnsPath = "../恒昌12万_青禾苗.xlsx"

pdData = pd.read_excel(rawPtnsPath)
pdData['mn'] = pdData['apply_date'].apply(lambda x:str(x)[:6])


featPath = "D:/yrProj/20240612_众安15w/青禾苗-幸福消金-20240603_report.xlsx/银融_青禾苗_三版模型/银融_青禾苗_入模特征_子分1.1.txt"

vFeats = []
for f in open(featPath):
  f = f.strip()
  if len(f) > 0:
    vFeats.append(f)




print("vFeats:")
print(vFeats)
print(len(vFeats))


colLab = 'y1'
#colLab = 'y2'
#colLab = 'y3'

ptnProxPath = "./预估分/w12_" + colLab +"_prox"
testProxPath = "./预估分/k49_" + colLab +"_prox"


pdPtn_train = pdData[pdData['flag'] == 'train']
if colLab == 'y3':
  pdPtn_train = pdPtn_train[pdPtn_train['y3']>=0]
  
pdPtn_valid = pdData[pdData['flag'] == 'valid']
if colLab == 'y3':
  pdPtn_valid = pdPtn_valid[pdPtn_valid['y3']>=0]

pdPtn_oot = pdData[pdData['flag'] == 'oot']
if colLab == 'y3':
  pdPtn_oot = pdPtn_oot[pdPtn_oot['y3']>=0]


print("pdPtn_train.y3.value_counts():")
print(pdPtn_train.y3.value_counts())
print("pdPtn_valid.y3.value_counts():")
print(pdPtn_valid.y3.value_counts())
print("pdPtn_oot.y3.value_counts():")
print(pdPtn_oot.y3.value_counts())


dParams = {'learning_rate':0.03, 'n_estimators':100, 'max_depth':3, 'min_child_weight':120}
#dParams = {'learning_rate':0.02, 'n_estimators':100, 'max_depth':3, 'min_child_weight':60}

if True:
  learning_rate = dParams['learning_rate']
  n_estimators = dParams['n_estimators']
  max_depth = dParams['max_depth']
  min_child_weight = dParams['min_child_weight']
  
  print(">>>>>")
  print(colLab)
#  print(vFeats)
  print("learning_rate="+str(learning_rate))
  print("n_estimators="+str(n_estimators))
  print("max_depth="+str(max_depth))
  print("min_child_weight="+str(min_child_weight))

  train_x = pdPtn_train[vFeats]
  train_y = pdPtn_train[colLab]
  test_x = pdPtn_valid[vFeats]
  test_y = pdPtn_valid[colLab]


  # 更新寻优后的参数
  xgb1 = XGBClassifier(
   learning_rate =learning_rate,
   n_estimators=n_estimators,
   max_depth=max_depth,
   min_child_weight=min_child_weight, #gamma=2.0, reg_lambda=2.0, reg_alpha=2.0, # subsample=0.8, colsample_bytree=0.8, 
#   eta=0.01,
   objective= 'binary:logistic',
   nthread=4,
#   scale_pos_weight=1,
   seed=27)


  print("参数寻优后训练：")
  xgb1.fit(train_x,train_y) 
  prex=xgb1.predict(test_x) 
  prox=xgb1.predict_proba(test_x)[:,1]
  prox_train=xgb1.predict_proba(train_x)[:,1]

  train_prox=xgb1.predict_proba(train_x)[:,1]
  print('train ROC: {}'.format(roc_auc_score(train_y,train_prox)))
  ks2_train = ks_score_V2(train_y,prox_train)
  print("ks2_train:" + str(ks2_train))


  print('test accuracy: {}'.format(accuracy_score(test_y,prex))) 
  print('test ROC: {}'.format(roc_auc_score(test_y,prox)))
  ks2_test = ks_score_V2(test_y,prox)
  print("ks2_test:" + str(ks2_test))


  #prex_oot = xgb1.predict(pdPtn_oot.drop(toDrop,axis=1)) 
  #prox_oot = xgb1.predict_proba(pdPtn_oot.drop(toDrop,axis=1))[:,1]
  prex_oot = xgb1.predict(pdPtn_oot[vFeats]) 
  prox_oot = xgb1.predict_proba(pdPtn_oot[vFeats])[:,1]
  print('oot accuracy: {}'.format(accuracy_score(pdPtn_oot[colLab], prex_oot))) 
  print('oot ROC: {}'.format(roc_auc_score(pdPtn_oot[colLab], prox_oot)))
  ks2_oot = ks_score_V2(pdPtn_oot[colLab],prox_oot)
  print("ks2_oot:" + str(ks2_oot))


  vSLStats = calc_score_labs_bins_by_us(prox_oot, pdPtn_oot[colLab], numGroups=10)
  for sl in vSLStats:
    print(sl)


  fi = xgb1.feature_importances_
  tc = list(train_x.columns)

  dFws = {}
  for fIdx in range(len(fi)):
    cc = tc[fIdx]
    cw = fi[fIdx]
    dFws[cc] = cw

  sFMWs = sorted(dFws.items(), key=lambda x: x[1],reverse=True)

  # 记录保存特征指标
  nF = 0
  for kv in sFMWs:
    ft = kv[0]
    wgt = kv[1]
    if wgt > 0.0:
      print("FI," + ft + "," + str(wgt))
      nF += 1
  print("nF:" + str(nF))
  
  calc_oot_preds_psis(pdPtn_oot['apply_date'], prox_oot, 4, "oot", None)

  calc_oot_preds_psis(pdPtn_valid['apply_date'], prox, 10, "test", None)
  calc_oot_preds_psis(pdPtn_train['apply_date'], prox_train, 10, "train", None)
#  calc_oot_preds_psis(pdData['apply_date'], prox_train, 10, None)

  psiTrainTest,_1 = cal_psi(pd.Series(prox_train), pd.Series(prox))
  psiTrainOot,_2 = cal_psi(pd.Series(prox_train), pd.Series(prox_oot))
  psiTestOot,_3 = cal_psi(pd.Series(prox), pd.Series(prox_oot))
  print("psiTrainTest:" + str(psiTrainTest))
  print("psiTrainOot:" + str(psiTrainOot))
  print("psiTestOot:" + str(psiTestOot))
  
  calc_date_ranges_effects(pdPtn_train['apply_date'], prox_train.tolist(), pdPtn_train[colLab].tolist(), 10, "train eff", None)
  calc_date_ranges_effects(pdPtn_valid['apply_date'], prox.tolist(), pdPtn_valid[colLab].tolist(), 10, "valid eff", None)
  calc_date_ranges_effects(pdPtn_oot['apply_date'], prox_oot.tolist(), pdPtn_oot[colLab].tolist(), 4, "oot eff", None)



# pdData
# ptnProxPath

# D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx
# testProxPath

prox_ptn = xgb1.predict_proba(pdData[vFeats])[:,1]
fOutPtn = open(ptnProxPath, 'w')
for pp in prox_ptn:
  fOutPtn.write(str(pp) + "\n")
fOutPtn.close()

pdTest = pd.read_excel("D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx") #, encoding='utf-8')
prox_test = xgb1.predict_proba(pdTest[vFeats])[:,1]
fOutTest = open(testProxPath, 'w')
for pp in prox_test:
  fOutTest.write(str(pp) + "\n")
fOutTest.close()



'''
>>>>>
y1    <<<<<<<<<<<<<<<<
learning_rate=0.03
n_estimators=100
max_depth=3
min_child_weight=120
参数寻优后训练：
train ROC: 0.6767875594433378
ks2_train:0.26429550093672227
test accuracy: 0.9811590222466355
test ROC: 0.6252680495395049
ks2_test:0.1908194440499939
oot accuracy: 0.9840916222152495
oot ROC: 0.614781950439597
ks2_oot:0.1636312364706185
(0.0304, 0.0647, 0.0352, 3187, 0.0317, 1.9921, 1.9921, 0.0711)
(0.0266, 0.0304, 0.0283, 3187, 0.021, 1.3215, 1.6568, 0.0093)
(0.0244, 0.0266, 0.0254, 3187, 0.0169, 1.0651, 1.4596, 0.0004)
(0.0228, 0.0244, 0.0235, 3187, 0.0176, 1.1045, 1.3708, 0.0011)
(0.0217, 0.0228, 0.0222, 3187, 0.0169, 1.0651, 1.3097, 0.0004)
(0.0208, 0.0217, 0.0212, 3187, 0.0129, 0.8087, 1.2262, 0.0042)
(0.0198, 0.0208, 0.0203, 3187, 0.016, 1.0059, 1.1947, 0.0)
(0.0184, 0.0198, 0.0191, 3187, 0.0107, 0.6706, 1.1292, 0.0136)
(0.0167, 0.0184, 0.0176, 3187, 0.01, 0.6312, 1.0739, 0.0175)
(0.0129, 0.0167, 0.0156, 3187, 0.0053, 0.3353, 1.0, 0.0745)
FI,S02_n88,0.03268833
FI,S02_n62,0.031768955
FI,S02_n53,0.03154678
FI,S02_n83,0.029596698
FI,S03_n97,0.029482609
FI,S03_n61,0.028254839
FI,S03_n86,0.02755401
FI,S03_n53,0.027493251
FI,S01_n1,0.026515448
FI,S02_n74,0.026398845
FI,S03_n84,0.025420766
FI,S03_n38,0.02535232
FI,S02_n48,0.025166422
FI,S02_n84,0.024855874
FI,S03_n29,0.024616404
FI,S03_n23,0.024026267
FI,S03_n75,0.023899654
FI,S03_n56,0.022917597
FI,S03_n10,0.02144004
FI,S03_n55,0.020910636
FI,S03_n69,0.020696046
FI,S03_n51,0.020605247
FI,S03_n16,0.020088596
FI,S03_n9,0.018669384
FI,S02_n45,0.018481392
FI,S01_n23,0.017192775
FI,S02_n79,0.017124457
FI,S02_n69,0.01681405
FI,S05_n2,0.016745925
FI,S04_n2,0.016324187
FI,S03_n92,0.016231285
FI,S03_n28,0.0161811
FI,S03_n3,0.016176708
FI,S01_n11,0.015971392
FI,S02_n44,0.014301745
FI,S01_n12,0.014118982
FI,S03_n96,0.013994751
FI,S03_n19,0.013689583
FI,S01_n21,0.013491596
FI,S03_n82,0.013491368
FI,S05_n3,0.013134869
FI,S02_n64,0.013108385
FI,S03_n80,0.012369093
FI,S03_n15,0.011837087
FI,S01_n3,0.011171589
FI,S01_n6,0.010780178
FI,S03_n17,0.010612183
FI,S03_n45,0.009604165
FI,S04_n11,0.009172991
FI,S04_n1,0.008399356
FI,S03_n27,0.008111343
FI,S03_n72,0.0057301563
FI,S03_n59,0.0056411303
FI,S03_n48,0.005212119
FI,S03_n20,0.0048190723
nF:55
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.0048172397897582285
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.002236890368222426
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.004769881221044998
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.005864089666154093
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0065203419293133865
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.006532572060674551
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.016819990289649984
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.004021403928167781
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.005040005817774362
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.07558037426369521
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.002422873497081973
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0023899527398501775
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0017237273906856802
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00395370724545793
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0005672236191238637
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.006977134728773124
train， grp_6, 999
psiTrainTest:0.00045239480323736134
psiTrainOot:0.0030208503835471067
psiTestOot:0.004360995148542886
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6752440953109614 - ks:0.2622078648899468
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.7012615088219084 - ks:0.32140117935334783
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6441146228949367 - ks:0.2400466865516795
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6613915434147275 - ks:0.24609759095378558
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6744287746366969 - ks:0.2728922649101674
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6805680612010492 - ks:0.265296384413735
6 - (20240112, 20240112)
999
999
train eff, auc:0.7777374909354605 - ks:0.4287164612037708
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6442583534945733 - ks:0.2902319728553559
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6381121055780314 - ks:0.22498119332081534
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.6213885233709022 - ks:0.22755878482750735
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.6003838709152719 - ks:0.2589908749329039
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6423585209381943 - ks:0.2594325918131778
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5995353051204115 - ks:0.18823357493570264
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5479591836734694 - ks:0.2683673469387755
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6335556807415563 - ks:0.21475942949325272
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6343125849415602 - ks:0.2153846153846154
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5783547013430231 - ks:0.15065018168521604
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5950413598763749 - ks:0.14785474047813837
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6386062751903269 - ks:0.22584527541424093




>>>>>
y1
learning_rate=0.03
n_estimators=80
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.665692661875868
ks2_train:0.2428962244335935
test accuracy: 0.9811590222466355
test ROC: 0.6192744961125676
ks2_test:0.1935897925560166
oot accuracy: 0.9840916222152495
oot ROC: 0.6119013214292071
ks2_oot:0.16681649962414413
(0.0342, 0.0677, 0.0393, 3187, 0.0292, 1.8343, 1.8343, 0.0526)
(0.0309, 0.0342, 0.0324, 3187, 0.0248, 1.5582, 1.6963, 0.0257)
(0.0287, 0.0309, 0.0298, 3187, 0.0129, 0.8087, 1.4004, 0.0042)
(0.0272, 0.0287, 0.0279, 3187, 0.0207, 1.3018, 1.3757, 0.0082)
(0.0262, 0.0272, 0.0266, 3187, 0.0147, 0.927, 1.286, 0.0006)
(0.0254, 0.0262, 0.0258, 3187, 0.0151, 0.9467, 1.2295, 0.0003)
(0.0248, 0.0254, 0.0251, 3187, 0.0129, 0.8087, 1.1693, 0.0042)
(0.0235, 0.0248, 0.0242, 3187, 0.0122, 0.7692, 1.1193, 0.0062)
(0.0212, 0.0235, 0.0223, 3187, 0.0107, 0.6706, 1.0695, 0.0136)
(0.0186, 0.0212, 0.0202, 3187, 0.006, 0.3748, 1.0, 0.063)
FI,S02_n62,0.041951276
FI,S02_n88,0.041274928
FI,S02_n53,0.0405874
FI,S02_n83,0.039904382
FI,S02_n50,0.038892057
FI,S02_n48,0.037780687
FI,S02_n57,0.037053213
FI,S03_n61,0.03695487
FI,S03_n97,0.034744497
FI,S03_n53,0.034728236
FI,S03_n66,0.033510953
FI,S03_n38,0.031806763
FI,S03_n23,0.03150187
FI,S03_n75,0.029318908
FI,S03_n56,0.028354531
FI,S03_n86,0.027855098
FI,S02_n74,0.026641618
FI,S03_n51,0.025406098
FI,S03_n55,0.024232963
FI,S05_n2,0.023607988
FI,S02_n79,0.023136206
FI,S03_n92,0.022171102
FI,S03_n28,0.021416344
FI,S03_n9,0.021254262
FI,S01_n23,0.020705659
FI,S03_n21,0.020565005
FI,S01_n11,0.020409701
FI,S03_n69,0.020134116
FI,S02_n64,0.019665882
FI,S03_n74,0.018222284
FI,S03_n96,0.017730061
FI,S03_n15,0.016008746
FI,S03_n80,0.015187668
FI,S05_n3,0.011766601
FI,S01_n3,0.011700142
FI,S01_n22,0.01109614
FI,S04_n1,0.009957478
FI,S03_n68,0.009773889
FI,S03_n29,0.008547112
FI,S03_n59,0.007816631
FI,S03_n20,0.00662666
nF:41
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.0039285633442458535
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0019742766304921464
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.005599417713910421
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.013021615832838333
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.020131184904316535
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.009644670051723177
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.008373251156875471
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.010672010299172353
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.002872873567448588
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.053402838636827446
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.007039109458900859
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0030477074189579623
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.00409671698374039
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00816385695397613
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.001018754442912037
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.00648959919486496
train， grp_6, 999
psiTrainTest:0.0003962174990202567
psiTrainOot:0.003138105708112297
psiTestOot:0.004454812036452153
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6619468117512876 - ks:0.2377792949794204
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6883557948350167 - ks:0.29422907079440336
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6323867440894138 - ks:0.2198158474905979
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6564857918851743 - ks:0.24423344804981972
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6638380385473 - ks:0.26091831765640616
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6699620632677739 - ks:0.2647652997083097
6 - (20240112, 20240112)
999
999
train eff, auc:0.754713560551124 - ks:0.3934010152284264
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6520856584298951 - ks:0.28289902402121236
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6274515320779751 - ks:0.2266634285612742
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.6064992276446022 - ks:0.17327936380799813
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5960693895476503 - ks:0.2669285447063225
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6260846008276338 - ks:0.25827830170159644
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5986526771101239 - ks:0.18818096796820205
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5785714285714286 - ks:0.3826530612244898
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6326192244239514 - ks:0.2146442014697975
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6266556129382985 - ks:0.20258222343027998
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5842326180192121 - ks:0.17195267804770276
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5858018589219163 - ks:0.1643055176802109
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6427346898790864 - ks:0.26296602104791755




>>>>>
y1
learning_rate=0.01
n_estimators=100
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.6365994519908952
ks2_train:0.1976058795338042
test accuracy: 0.9811590222466355
test ROC: 0.6136725096488042
ks2_test:0.17262863684751212
oot accuracy: 0.9840916222152495
oot ROC: 0.5932871627712928
ks2_oot:0.12387811590448705
D:\yrProj\20240615_恒昌\百度云_青禾苗-西安普惠-0617\青禾苗-西安普惠262子分-0617\fit_v2\fit_y1_tune_parms.py:226: RuntimeWarning: divide by zero encountered in log
  WOEi = np.log(badattr / goodattr)
(0.0627, 0.0828, 0.0667, 3187, 0.0292, 1.8343, 1.8343, 0.0526)
(0.0601, 0.0627, 0.0612, 3187, 0.0185, 1.1637, 1.499, 0.0026)
(0.0587, 0.0601, 0.0593, 3187, 0.0195, 1.2229, 1.407, 0.0046)
(0.0573, 0.0587, 0.0578, 3187, 0.0176, 1.1045, 1.3314, 0.0011)
(0.057, 0.0573, 0.0571, 3187, 0.0493, 3.0966, 1.6844, 0.2482)
(0.057, 0.057, 0.057, 3187, 0.0, 0.0, 1.4037, inf)
(0.057, 0.057, 0.057, 3187, 0.0, 0.0, 1.2032, inf)
(0.0558, 0.057, 0.0565, 3187, 0.0091, 0.572, 1.1243, 0.0246)
(0.0524, 0.0558, 0.0534, 3187, 0.016, 1.0059, 1.1111, 0.0)
(0.0524, 0.0524, 0.0524, 3187, 0.0, 0.0, 1.0, inf)
FI,S02_n48,0.115949616
FI,S02_n62,0.11120159
FI,S02_n53,0.10613872
FI,S02_n50,0.10522537
FI,S02_n57,0.100530736
FI,S03_n97,0.0817624
FI,S03_n38,0.07373965
FI,S03_n23,0.07361467
FI,S03_n51,0.06119123
FI,S01_n11,0.053429738
FI,S03_n92,0.052181818
FI,S01_n23,0.041638076
FI,S01_n3,0.023396363
nF:13
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.00203368638532269
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.00203871985439642
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.010490865363825552
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.020856202523132302
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.024400923825733164
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.015079482403638904
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.00937421603973142
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.024792176718030193
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.009467998174964076
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.09706649564104143
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.020722300446200676
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.01078221936372632
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.00870618407444164
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.01798518249808843
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.004385614187444888
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.009363223993816474
train， grp_6, 999
psiTrainTest:0.00013105201509920129
psiTrainOot:0.006556594049671185
psiTestOot:0.006499113975809225
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6373991385600635 - ks:0.1902849800579488
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6626713664827247 - ks:0.2534240094029524
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6022641257255075 - ks:0.17716805305963645
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6250987978668197 - ks:0.19274418223533263
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6389957689383261 - ks:0.21242396476749448
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6345594323672122 - ks:0.18299930641371573
6 - (20240112, 20240112)
999
999
train eff, auc:0.6791515591007977 - ks:0.26860043509789705
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6518422255012846 - ks:0.23516369928168557
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6176039402214152 - ks:0.20006160298063658
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.6031308999370673 - ks:0.16308856341895983
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.6130995949836529 - ks:0.22648384001041005
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6182558440844594 - ks:0.21204387872886743
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5775543605330838 - ks:0.15413841477671264
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5673469387755102 - ks:0.34183673469387754
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6264645847584418 - ks:0.2004327452436433
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6034857298178852 - ks:0.1350095134547432
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5558996274881732 - ks:0.1180093089867526
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5741608717389328 - ks:0.1466684846832106
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6127896887595164 - ks:0.16843931930138828


y1
learning_rate=0.02
n_estimators=100
max_depth=4
min_child_weight=100
参数寻优后训练：
train ROC: 0.6836739152911708
ks2_train:0.26980346714058767
test accuracy: 0.9811590222466355
test ROC: 0.613377324633006
ks2_test:0.17927809350142476
oot accuracy: 0.9840916222152495
oot ROC: 0.6124736738934262
ks2_oot:0.1615984135881418
(0.0395, 0.068, 0.0456, 3187, 0.0317, 1.9921, 1.9921, 0.0711)
(0.0357, 0.0395, 0.0374, 3187, 0.021, 1.3215, 1.6568, 0.0093)
(0.0336, 0.0357, 0.0345, 3187, 0.0188, 1.1834, 1.499, 0.0032)
(0.0323, 0.0336, 0.0329, 3187, 0.0141, 0.8876, 1.3462, 0.0014)
(0.0312, 0.0323, 0.0317, 3187, 0.0182, 1.144, 1.3057, 0.002)
(0.0303, 0.0312, 0.0307, 3187, 0.0132, 0.8284, 1.2262, 0.0033)
(0.0293, 0.0303, 0.0298, 3187, 0.0141, 0.8876, 1.1778, 0.0014)
(0.028, 0.0293, 0.0287, 3187, 0.0116, 0.7298, 1.1218, 0.0088)
(0.0259, 0.028, 0.0269, 3187, 0.0088, 0.5523, 1.0585, 0.0273)
(0.0227, 0.0259, 0.0248, 3187, 0.0075, 0.4734, 1.0, 0.0405)
FI,S02_n62,0.04179109
FI,S02_n53,0.04070353
FI,S02_n50,0.038365766
FI,S02_n57,0.037246075
FI,S02_n48,0.03421224
FI,S02_n88,0.033252615
FI,S02_n83,0.030691061
FI,S03_n23,0.029984923
FI,S03_n38,0.029281355
FI,S03_n97,0.025754888
FI,S03_n51,0.024590842
FI,S03_n9,0.024120038
FI,S03_n55,0.022625046
FI,S03_n75,0.019256989
FI,S01_n11,0.019130992
FI,S03_n92,0.018163525
FI,S05_n2,0.01696177
FI,S03_n86,0.016786557
FI,S01_n22,0.016478874
FI,S03_n3,0.016428882
FI,S03_n80,0.016091218
FI,S05_n1,0.015893525
FI,S03_n48,0.01564795
FI,S01_n2,0.015570117
FI,S01_n23,0.015420982
FI,S03_n28,0.015293035
FI,S01_n8,0.015214214
FI,S03_n61,0.015055679
FI,S01_n12,0.014872957
FI,S03_n66,0.014742123
FI,S03_n41,0.014634594
FI,S03_n1,0.014290388
FI,S02_n36,0.01427364
FI,S03_n15,0.014179921
FI,S01_n6,0.01362225
FI,S03_n104,0.013605538
FI,S03_n69,0.013583948
FI,S01_n13,0.013416515
FI,S03_n100,0.012765205
FI,S02_n64,0.0126803
FI,S01_n3,0.0121674845
FI,S03_n102,0.011550767
FI,S02_n74,0.011318633
FI,S03_n87,0.011301058
FI,S02_n44,0.010551648
FI,S01_n21,0.010203655
FI,S03_n56,0.009741406
FI,S03_n59,0.009286
FI,S03_n96,0.009220784
FI,S03_n16,0.009147229
FI,S03_n84,0.008756666
FI,S02_n1,0.008682701
FI,S03_n27,0.0073188827
FI,S04_n2,0.007164841
FI,S03_n29,0.0065798177
FI,S03_n71,0.005391373
FI,S03_n49,0.0047136615
FI,S03_n20,0.004468035
FI,S02_n69,0.004381759
FI,S04_n27,0.0033904738
FI,S04_n1,0.0028293685
FI,S03_n105,0.0027820931
FI,S03_n83,0.002370455
nF:63
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.004966293288213861
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0042869835468614895
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.004986725378900236
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.009734107526023465
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.007257696819032103
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.004889091563070277
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.008375040770472814
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.009170216098369502
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.005802057925742286
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.042608891181241976
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0045231291307695504
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.00207892286795046
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0028804760713903007
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.004988706929308249
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0014498238865866256
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.014423657761143389
train， grp_6, 999
psiTrainTest:0.0003760177320745669
psiTrainOot:0.0024613183368321877
psiTestOot:0.0025696299785524872
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6789276631316459 - ks:0.253749318144787
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.7052569127637109 - ks:0.32092970363434736
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6546779845383881 - ks:0.261213109287289
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6687004516961652 - ks:0.2555207309078991
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6865306041810835 - ks:0.2925973507274607
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6887730983068074 - ks:0.29515568020575655
6 - (20240112, 20240112)
999
999
train eff, auc:0.7616388687454678 - ks:0.5048585931834662
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6443969230077824 - ks:0.25158231403596815
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6280942170201927 - ks:0.22186550410785255
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.6104003375479146 - ks:0.20378740202528745
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5924380682834789 - ks:0.20263016639828235
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6179849392623536 - ks:0.178137588240375
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5775689735796119 - ks:0.17855389291559504
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5632653061224491 - ks:0.27142857142857146
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.630043055042415 - ks:0.20084244488259545
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6191742321282958 - ks:0.1813481924435988
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5849890684157202 - ks:0.15383900480684995
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5922950186346696 - ks:0.1643191528042905
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6507116267353336 - ks:0.2640855911330049



>>>>>
y1
learning_rate=0.02
n_estimators=120
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.6657236583563586
ks2_train:0.24387150810016067
test accuracy: 0.9811590222466355
test ROC: 0.6187449258699593
ks2_test:0.18825002048422423
oot accuracy: 0.9840916222152495
oot ROC: 0.6118172388839196
ks2_oot:0.15900034469441343
(0.0343, 0.0685, 0.0394, 3187, 0.0289, 1.8146, 1.8146, 0.0504)
(0.031, 0.0343, 0.0325, 3187, 0.0235, 1.4793, 1.6469, 0.0194)
(0.0287, 0.031, 0.0298, 3187, 0.0154, 0.9665, 1.4201, 0.0001)
(0.0272, 0.0287, 0.0279, 3187, 0.0185, 1.1637, 1.356, 0.0026)
(0.0261, 0.0272, 0.0266, 3187, 0.0151, 0.9467, 1.2742, 0.0003)
(0.0255, 0.0261, 0.0258, 3187, 0.0176, 1.1045, 1.2459, 0.0011)
(0.0248, 0.0255, 0.0252, 3187, 0.0151, 0.9467, 1.2032, 0.0003)
(0.0236, 0.0248, 0.0242, 3187, 0.0085, 0.5325, 1.1193, 0.0303)
(0.0213, 0.0236, 0.0224, 3187, 0.0104, 0.6509, 1.0673, 0.0154)
(0.0188, 0.0213, 0.0203, 3187, 0.0063, 0.3945, 1.0, 0.0578)
FI,S02_n88,0.037869338
FI,S02_n62,0.03753792
FI,S02_n53,0.036363326
FI,S02_n83,0.036001835
FI,S02_n50,0.034691013
FI,S03_n61,0.033569463
FI,S02_n57,0.033233777
FI,S03_n53,0.03274117
FI,S02_n48,0.032196954
FI,S03_n97,0.03095847
FI,S03_n66,0.030227033
FI,S03_n86,0.03007878
FI,S02_n84,0.02958411
FI,S03_n38,0.02851836
FI,S03_n23,0.027534131
FI,S03_n56,0.026916381
FI,S03_n75,0.026569143
FI,S03_n102,0.026429566
FI,S03_n51,0.02302109
FI,S03_n55,0.022507537
FI,S02_n74,0.022340925
FI,S03_n62,0.021491585
FI,S03_n9,0.020766456
FI,S05_n2,0.02060583
FI,S02_n79,0.020463172
FI,S03_n92,0.019310193
FI,S03_n21,0.018827304
FI,S03_n25,0.01874292
FI,S01_n23,0.018736297
FI,S01_n11,0.018565008
FI,S03_n80,0.017639091
FI,S02_n64,0.016153391
FI,S03_n3,0.01590187
FI,S03_n96,0.01577532
FI,S03_n15,0.014451553
FI,S05_n3,0.014417276
FI,S03_n69,0.011595852
FI,S03_n84,0.010556982
FI,S01_n3,0.010242518
FI,S03_n45,0.010105583
FI,S04_n1,0.009266533
FI,S03_n68,0.0088496655
FI,S03_n72,0.0075623686
FI,S03_n29,0.007515938
FI,S03_n59,0.0072326493
FI,S03_n20,0.0063343025
nF:46
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.002691087846916169
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0019516443716356543
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.004151379969960769
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.01370568108473651
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.015281161302950852
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.00833101713778649
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.010425114696451574
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.010506739239973351
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.004091777193855814
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.058667471234275535
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.005138177699170378
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0038210914978061877
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.002988715252349483
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00772650528962406
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0011544731962611948
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.005611242448517876
train， grp_6, 999
psiTrainTest:0.00033920262650457677
psiTrainOot:0.0023671851033614045
psiTestOot:0.004301931827213088
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6604538144928769 - ks:0.22143026657882248
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6889961883512296 - ks:0.2956891514101108
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6329481604598785 - ks:0.22271523056116493
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6576995332814377 - ks:0.24791052114060969
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6622751514297187 - ks:0.2538850586496484
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6715780306729117 - ks:0.2663729573736199
6 - (20240112, 20240112)
999
999
train eff, auc:0.7419506889050036 - ks:0.3903553299492386
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6491345023107402 - ks:0.2751466215254631
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6255116305242769 - ks:0.22158414434052237
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.6087912638022771 - ks:0.1697680073230734
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5956302151954326 - ks:0.24893052912376584
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.627611875839216 - ks:0.2540301999984296
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5994505494505495 - ks:0.17124152443301377
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5683673469387754 - ks:0.3826530612244898
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6310261512742756 - ks:0.21428571428571436
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6255074748572982 - ks:0.18448491437890735
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5850896237554373 - ks:0.16022883957614398
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5869409599127352 - ks:0.16501908917371147
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6426489727944469 - ks:0.2612516793551276



>>>>>
y1
learning_rate=0.02
n_estimators=130
max_depth=3
min_child_weight=110
参数寻优后训练：
train ROC: 0.6702184135567768
ks2_train:0.252109137090497
test accuracy: 0.9811590222466355
test ROC: 0.6222676411607879
ks2_test:0.19193489574917255
oot accuracy: 0.9840916222152495
oot ROC: 0.6139390810953824
ks2_oot:0.16522553460493555
(0.0326, 0.065, 0.0375, 3187, 0.0301, 1.8935, 1.8935, 0.0593)
(0.0292, 0.0326, 0.0307, 3187, 0.022, 1.3807, 1.6371, 0.0127)
(0.027, 0.0292, 0.028, 3187, 0.0157, 0.9862, 1.4201, 0.0)
(0.0255, 0.027, 0.0262, 3187, 0.0185, 1.1637, 1.356, 0.0026)
(0.0244, 0.0255, 0.0249, 3187, 0.0179, 1.1243, 1.3097, 0.0015)
(0.0237, 0.0244, 0.0241, 3187, 0.0141, 0.8876, 1.2393, 0.0014)
(0.0229, 0.0237, 0.0233, 3187, 0.0132, 0.8284, 1.1806, 0.0033)
(0.0216, 0.0229, 0.0223, 3187, 0.0113, 0.7101, 1.1218, 0.0102)
(0.0194, 0.0216, 0.0205, 3187, 0.0097, 0.6114, 1.0651, 0.0197)
(0.0162, 0.0194, 0.0183, 3187, 0.0066, 0.4142, 1.0, 0.053)
FI,S02_n88,0.03430222
FI,S02_n62,0.03383402
FI,S02_n53,0.03319937
FI,S02_n83,0.03230929
FI,S02_n50,0.03122198
FI,S03_n97,0.030779516
FI,S03_n61,0.03034685
FI,S03_n53,0.029270627
FI,S02_n48,0.028878635
FI,S03_n86,0.028583007
FI,S02_n84,0.02728689
FI,S02_n74,0.027032038
FI,S03_n38,0.026386386
FI,S03_n66,0.026304552
FI,S03_n23,0.02586551
FI,S03_n75,0.024916483
FI,S03_n102,0.024520988
FI,S03_n56,0.024137458
FI,S03_n10,0.02323588
FI,S03_n55,0.021488767
FI,S03_n51,0.02082811
FI,S03_n9,0.020732025
FI,S03_n62,0.019861436
FI,S02_n79,0.01849732
FI,S03_n84,0.018168343
FI,S03_n28,0.017892512
FI,S01_n23,0.017465444
FI,S05_n2,0.017376663
FI,S03_n25,0.017373616
FI,S01_n11,0.01702032
FI,S04_n2,0.016931271
FI,S03_n92,0.016899167
FI,S03_n21,0.016231967
FI,S02_n64,0.016184818
FI,S03_n80,0.016082365
FI,S03_n96,0.014665046
FI,S03_n82,0.013809047
FI,S05_n3,0.0134262545
FI,S03_n15,0.012560468
FI,S01_n5,0.012514702
FI,S03_n69,0.01181165
FI,S03_n17,0.010355483
FI,S03_n27,0.010225542
FI,S01_n3,0.010078012
FI,S03_n29,0.009696093
FI,S03_n105,0.008324735
FI,S04_n1,0.008309085
FI,S03_n4,0.008220428
FI,S03_n68,0.00707197
FI,S03_n72,0.0062206974
FI,S03_n59,0.0060939216
FI,S03_n20,0.005171091
nF:52
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.003305722331956226
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0021884074745352547
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.005021643223821461
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.009664610771485818
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.013646713499789062
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.008713169502201844
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.008407657848258006
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.00576088610611603
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.004746459572143607
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.07846161972645652
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0049529908705464
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0036662358322164133
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0030550259762400734
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.005283550871835912
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0006748484841935833
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.006809647444259064
train， grp_6, 999
psiTrainTest:0.00044928644757668526
psiTrainOot:0.0031594707395666906
psiTestOot:0.00464665155623873
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6656009358241416 - ks:0.23686587465198816
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6928380306592027 - ks:0.30622347949080625
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6382754956476171 - ks:0.22464197713841083
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6593556245220147 - ks:0.25045067191084897
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6679449556176368 - ks:0.25128523036966793
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6760948359214172 - ks:0.27647760100396435
6 - (20240112, 20240112)
999
999
train eff, auc:0.758158085569253 - ks:0.4086294416243655
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6478049840083291 - ks:0.29817163145003633
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6314631300237527 - ks:0.22710768082547994
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.614748412380571 - ks:0.19728674409291147
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5974479090421119 - ks:0.2698401080043592
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6312396447613291 - ks:0.2621259354068677
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.6040039747486556 - ks:0.1749941547813888
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5653061224489796 - ks:0.35
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6329685665268079 - ks:0.2051955035464625
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.631351997825496 - ks:0.21037782005979883
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5850073512047596 - ks:0.16174174036916
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5888112444323244 - ks:0.15889919098263797
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6407509516345723 - ks:0.2455007277205553



>>>>>
y1
learning_rate=0.02
n_estimators=130
max_depth=3
min_child_weight=130
参数寻优后训练：
train ROC: 0.66997392068846
ks2_train:0.25445370653962873
test accuracy: 0.9811590222466355
test ROC: 0.6214941372681324
ks2_test:0.19684294851392264
oot accuracy: 0.9840916222152495
oot ROC: 0.6125524423212291
ks2_oot:0.1646968270819501
(0.0326, 0.0615, 0.0374, 3187, 0.0314, 1.9724, 1.9724, 0.0687)
(0.0293, 0.0326, 0.0309, 3187, 0.0195, 1.2229, 1.5976, 0.0046)
(0.027, 0.0293, 0.0281, 3187, 0.0169, 1.0651, 1.4201, 0.0004)
(0.0255, 0.027, 0.0262, 3187, 0.0188, 1.1834, 1.3609, 0.0032)
(0.0245, 0.0255, 0.025, 3187, 0.0179, 1.1243, 1.3136, 0.0015)
(0.0237, 0.0245, 0.0241, 3187, 0.0138, 0.8679, 1.2393, 0.0019)
(0.0228, 0.0237, 0.0233, 3187, 0.0141, 0.8876, 1.1891, 0.0014)
(0.0215, 0.0228, 0.0222, 3187, 0.0107, 0.6706, 1.1243, 0.0136)
(0.0194, 0.0215, 0.0205, 3187, 0.0097, 0.6114, 1.0673, 0.0197)
(0.0163, 0.0194, 0.0184, 3187, 0.0063, 0.3945, 1.0, 0.0578)
FI,S02_n88,0.036963988
FI,S02_n53,0.035685603
FI,S02_n62,0.035462797
FI,S02_n83,0.03464075
FI,S03_n97,0.03296837
FI,S03_n61,0.032918595
FI,S02_n48,0.031798717
FI,S03_n86,0.029804226
FI,S03_n38,0.029369771
FI,S02_n84,0.029287623
FI,S03_n84,0.029086104
FI,S03_n53,0.029015273
FI,S03_n75,0.027532486
FI,S03_n102,0.027145475
FI,S03_n23,0.026706481
FI,S03_n56,0.026081316
FI,S02_n74,0.025229825
FI,S03_n29,0.023918487
FI,S03_n16,0.023293342
FI,S03_n51,0.022989938
FI,S03_n55,0.022905575
FI,S03_n62,0.02179884
FI,S02_n79,0.01975686
FI,S03_n9,0.01960503
FI,S05_n2,0.019261071
FI,S03_n25,0.019087126
FI,S01_n23,0.019083811
FI,S03_n92,0.01839279
FI,S02_n69,0.018096853
FI,S03_n28,0.018058952
FI,S01_n11,0.017793866
FI,S03_n69,0.017345548
FI,S03_n3,0.017306358
FI,S01_n12,0.015955321
FI,S03_n96,0.015540498
FI,S02_n64,0.015507327
FI,S03_n82,0.015192563
FI,S05_n3,0.014532407
FI,S01_n3,0.013213953
FI,S03_n15,0.012332336
FI,S01_n6,0.011924443
FI,S03_n17,0.011472411
FI,S03_n27,0.010586852
FI,S04_n1,0.008649685
FI,S03_n59,0.0062049064
FI,S03_n48,0.0055557988
FI,S03_n20,0.0049395706
nF:47
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.005743840827765361
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0016898588291310618
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.007322706878506447
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.007439786671071217
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.011436926507368772
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.005031023113507008
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.010465243192783002
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.0032880322591616073
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.006248796223943983
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.05375433347826372
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.0037415669588787047
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0027522188603589146
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0034484951120265757
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.005995048953972169
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0009878373575630745
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.017063548498294067
train， grp_6, 999
psiTrainTest:0.000796339051955404
psiTrainOot:0.0038277578195492347
psiTestOot:0.004202646910959687
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6679311981524381 - ks:0.2640510877804461
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6936502471927273 - ks:0.3059005850283218
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6390403716927051 - ks:0.24274227911888402
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6568289170900251 - ks:0.24047443461160273
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6688446702719327 - ks:0.2529527727230011
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6720527938083944 - ks:0.26516601382888433
6 - (20240112, 20240112)
999
999
train eff, auc:0.7605511240029007 - ks:0.42335025380710656
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6436741144659082 - ks:0.2878500752769518
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6317592981998897 - ks:0.23092232693412623
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.617626866525545 - ks:0.22355397906058705
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.5972811854454367 - ks:0.27399599863367985
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6348242259581786 - ks:0.26597356911213893
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5970598550385784 - ks:0.17917933130699087
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5683673469387754 - ks:0.35816326530612247
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6282625077275936 - ks:0.1949786188000922
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6359945637401467 - ks:0.20595814079913022
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5796870595942745 - ks:0.15828172254344064
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5871744614125989 - ks:0.14767066630306336
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6378925492610837 - ks:0.2472640506045678


>>>>>
y1
learning_rate=0.02
n_estimators=80
max_depth=3
min_child_weight=80
参数寻优后训练：
train ROC: 0.6471408347850741
ks2_train:0.2147585298304276
test accuracy: 0.9811590222466355
test ROC: 0.6140447349341387
ks2_test:0.17844256566295602
oot accuracy: 0.9840916222152495
oot ROC: 0.6022265774926309
ks2_oot:0.1420593783765478
D:\yrProj\20240615_恒昌\百度云_青禾苗-西安普惠-0617\青禾苗-西安普惠262子分-0617\fit_v2\fit_y1_tune_parms.py:226: RuntimeWarning: divide by zero encountered in log
  WOEi = np.log(badattr / goodattr)
(0.0455, 0.079, 0.0503, 3187, 0.0307, 1.9329, 1.9329, 0.0639)
(0.0427, 0.0455, 0.0439, 3187, 0.0201, 1.2623, 1.5976, 0.0063)
(0.0405, 0.0427, 0.0415, 3187, 0.0166, 1.0454, 1.4135, 0.0002)
(0.0395, 0.0405, 0.0399, 3187, 0.0201, 1.2623, 1.3757, 0.0063)
(0.0382, 0.0395, 0.0389, 3187, 0.0323, 2.0316, 1.5069, 0.0761)
(0.0382, 0.0382, 0.0382, 3187, 0.0, 0.0, 1.2558, inf)
(0.0381, 0.0382, 0.0381, 3187, 0.0138, 0.8679, 1.2003, 0.0019)
(0.0369, 0.0381, 0.0378, 3187, 0.0091, 0.572, 1.1218, 0.0246)
(0.0334, 0.0369, 0.0348, 3187, 0.0116, 0.7298, 1.0782, 0.0088)
(0.0329, 0.0334, 0.0331, 3187, 0.0047, 0.2959, 1.0, 0.088)
FI,S02_n88,0.08296681
FI,S02_n48,0.08135106
FI,S02_n62,0.08068461
FI,S02_n50,0.07633245
FI,S02_n53,0.07504844
FI,S02_n57,0.07033382
FI,S03_n97,0.06107854
FI,S03_n23,0.05490932
FI,S03_n38,0.052441645
FI,S03_n75,0.050976135
FI,S03_n51,0.04451191
FI,S01_n11,0.038430423
FI,S05_n2,0.03828391
FI,S03_n92,0.036092695
FI,S01_n23,0.031791843
FI,S03_n80,0.030123757
FI,S03_n15,0.026186034
FI,S02_n74,0.020682564
FI,S02_n85,0.020289661
FI,S01_n3,0.014787743
FI,S03_n59,0.012696641
nF:21
oot， grp_0, 5677
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.004299938948901902
oot， grp_1, 7205
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.0024387554125908277
oot， grp_2, 7004
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.002500587502479941
oot， grp_3, 7454
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.003114458123807334
oot， grp_4, 4530
test， grp_0, 2572
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.008954581353804763
test， grp_1, 4353
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.008449115691431796
test， grp_2, 2553
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.005256302263960086
test， grp_3, 2719
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.005237399411375273
test， grp_4, 2648
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0072564123100352694
test， grp_5, 3111
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.05924359609347886
test， grp_6, 249
train， grp_0, 10351
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.004417353118722623
train， grp_1, 17252
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.0019052393305020066
train， grp_2, 9968
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0013080888786993854
train， grp_3, 11064
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.002674578915857901
train， grp_4, 10817
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0013865778612411432
train， grp_5, 12365
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.018753917635351617
train， grp_6, 999
psiTrainTest:0.00021366252699877913
psiTrainOot:0.0016655254193719397
psiTestOot:0.0022490030494587026
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
10351
10351
train eff, auc:0.6453040100170729 - ks:0.20284218505373375
1 - (20231123, 20231202)
17252
17252
train eff, auc:0.6704240500760338 - ks:0.2580557603044007
2 - (20231203, 20231212)
9968
9968
train eff, auc:0.6124834254801648 - ks:0.18490282898271487
3 - (20231213, 20231222)
11064
11064
train eff, auc:0.6415013776220911 - ks:0.21035111438872506
4 - (20231223, 20240101)
10817
10817
train eff, auc:0.6452558606932022 - ks:0.23364601754790643
5 - (20240102, 20240111)
12365
12365
train eff, auc:0.6497045795160851 - ks:0.23321635681801223
6 - (20240112, 20240112)
999
999
train eff, auc:0.705221174764322 - ks:0.3145757795503988
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2572
2572
valid eff, auc:0.6581265401814138 - ks:0.2554847311376932
1 - (20231123, 20231202)
4353
4353
valid eff, auc:0.6155618606469498 - ks:0.20867121186094312
2 - (20231203, 20231212)
2553
2553
valid eff, auc:0.5987577950683677 - ks:0.17561073287945533
3 - (20231213, 20231222)
2719
2719
valid eff, auc:0.6116763447681322 - ks:0.2343808454919566
4 - (20231223, 20240101)
2648
2648
valid eff, auc:0.6222291148086785 - ks:0.22320201647415405
5 - (20240102, 20240111)
3111
3111
valid eff, auc:0.5806873977086742 - ks:0.16330371755903672
6 - (20240112, 20240112)
249
249
valid eff, auc:0.5545918367346938 - ks:0.32142857142857145
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
5677
5677
oot eff, auc:0.6301418219196622 - ks:0.20780733874478274
1 - (20240117, 20240120)
7205
7205
oot eff, auc:0.6227355259581407 - ks:0.1710029899429193
2 - (20240121, 20240124)
7004
7004
oot eff, auc:0.5600018282789039 - ks:0.12016667809340986
3 - (20240125, 20240128)
7454
7454
oot eff, auc:0.5811318289246433 - ks:0.15053404235978549
4 - (20240129, 20240131)
4530
4530
oot eff, auc:0.6181216412897448 - ks:0.2102902485445589


'''