
import pandas as pd
import numpy as np

import math
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

import pandas as pd
import numpy as np

import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns

from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

from sklearn.linear_model import LogisticRegression


from sklearn.metrics import classification_report,roc_auc_score,accuracy_score 
from xgboost.sklearn import XGBClassifier


#更标准版：
from sklearn import metrics
def ks_score_V2(y_true,y_pred):
    tpr,fpr,threshold=metrics.roc_curve(np.array(y_true),y_pred)
    ks=abs(tpr-fpr).max()
    #print(threshold)
    return ks


# 定义自动分箱函数---最优分箱
def mono_bin(Y, X, n ):
    r = 0  #设定斯皮尔曼 初始值
#    good=Y.sum()   #好客户的人数
#    bad=Y.count()-good   #坏客户的人数
    good=Y.sum()   #目标客户的人数
    bad=Y.count()-good  #非目标客户的人数

#    seXNorm,seYNorm,seXAbnorm,seYAbnorm = split_abnorm_ptns(X,Y)

    rowN = n

  #下面这段就是分箱的核心 ，就是 机器来 选择 指定 最优的分箱节点，代替我们自己来设置
    while np.abs(r) < 0.95: #1:   #while ,不满足条件时，跳出循环
#        X = seXNorm
#        Y = seYNorm

        d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates="drop")}) #注意这里是pd.qcut, Bucket： 将 X 分为 n 段，n由 斯皮尔曼系数决定
        d2 = d1.groupby('Bucket', as_index = True,  observed=True)
#        print("d2.mean().X:")
#        print(str(d2.mean().X))
#        print("d2.mean().Y:")
#        print(str(d2.mean().Y))

        r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)    #　？　？　？    # 以斯皮尔曼系数作为分箱终止条件
#        print("r:" + str(r))
#        print("n:" + str(n))
        n = n - 1
#    print("d2:" + str(d2))
#    print("res n:" + str(n))
#    print("res r:" + str(r))
    if str(r) == 'nan':
      n = 1

    d3 = pd.DataFrame(d2.X.min(), columns = ['min'])
    d3['min']=d2.min().X    #  min 就是分箱的节点
    d3['max'] = d2.max().X
    d3['sum'] = d2.sum().Y
    d3['total'] = d2.count().Y
    d3['rate'] = d2.mean().Y
    d3['woe']=np.log((d3['rate']/(1-d3['rate']))/(good/bad))
    d3['goodattribute']=d3['sum']/good
    d3['badattribute']=(d3['total']-d3['sum'])/bad

    rawIvs = (d3['goodattribute']-d3['badattribute'])*d3['woe']
#    print("rawIvs:" + str(rawIvs))
    d3['bktIV'] = rawIvs

    iv=((d3['goodattribute']-d3['badattribute'])*d3['woe']).sum()   #返回 iv

#    dAbn = calc_abnorm_infos(seXAbnorm,seYAbnorm, good,bad)  # TODO
#    d3 = pd.concat([d3,dAbn],ignore_index=True)

    d4 = (d3.sort_values(by = 'min')).reset_index(drop=True)    # 返回 d
    woe=list(d4['woe'].round(3))             #返回 woe
    cut=[]    #  cut 存放箱段节点
    cut.append(float('-inf'))  # 在 列表前加 -inf
    for i in range(1,n+1):            # n 是前面的 分箱的 分割数  ，所以 分成n+1份
         qua=X.quantile(i/(n+1))     #quantile 分为数  得到分箱的节点
         cut.append(round(qua,4))   # 保留4位小数       #返回cut
    cut.append(float('inf')) # 在列表后加  inf
#    print("d4:" + str(d4))
#    print("iv:" + str(iv))
#    print("cut:" + str(cut))
#    print("woe:" + str(woe))
    return d4,iv,cut,woe


def bin_frequency(x,y,n=10): # x为待分箱的变量，y为target变量.n为分箱数量
    total = y.count()       #1 计算总样本数
    bad = y.sum()           #2 计算坏样本数
    good = total-bad        #3 计算好样本数
    if x.value_counts().shape[0]==2:    #4 如果该变量值是0和1则只分两组
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
    else:
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')})  #5 用pd.cut实现等频分箱
#        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,n,duplicates='drop')})
    d2 = d1.groupby('bucket',as_index=True)  #6 按照分箱结果进行分组聚合
    d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
    d3['min_bin'] = d2.x.min()               #7 箱体的左边界
    d3['max_bin'] = d2.x.max()               #8 箱体的右边界
    d3['bad'] = d2.y.sum()                   #9 每个箱体中坏样本的数量
    d3['total'] = d2.y.count()               #10 每个箱体的总样本数
    d3['bad_rate'] = d3['bad']/d3['total']   #11 每个箱体中坏样本所占总样本数的比例
    d3['badattr'] = d3['bad']/bad            #12 每个箱体中坏样本所占坏样本总数的比例
    d3['goodattr'] = (d3['total'] - d3['bad'])/good    #13 每个箱体中好样本所占好样本总数的比例
    d3['WOEi'] = np.log(d3['badattr']/d3['goodattr'])  #14 计算每个箱体的woe值
    IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum()  #15 计算变量的iv值
    d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']   #16 计算IV
    d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
    cut = []
    cut.append(float('-inf'))
    for i in d4.min_bin:
        cut.append(i)
    cut.append(float('inf'))
    WOEi = list(d4['WOEi'].round(3))
    return IV,cut,WOEi,d4


def cal_psi(actual, predict, bins=10):
    """
    功能: 计算PSI值，并输出实际和预期占比分布曲线
    :param actual: Array或series，代表真实数据，如训练集模型得分
    :param predict: Array或series，代表预期数据，如测试集模型得分
    :param bins: 分段数
    :return:
        psi: float，PSI值
        psi_df:DataFrame

    Examples
    -----------------------------------------------------------------
    >>> import random
    >>> act = np.array([random.random() for _ in range(5000000)])
    >>> pct = np.array([random.random() for _ in range(500000)])
    >>> psi, psi_df = cal_psi(act,pct)
    >>> psi
    1.65652278590053e-05
    >>> psi_df
       actual  predict  actual_rate  predict_rate           psi
    0  498285    49612     0.099657      0.099226  1.869778e-06
    1  500639    50213     0.100128      0.100428  8.975056e-07
    2  504335    50679     0.100867      0.101360  2.401777e-06
    3  493872    49376     0.098775      0.098754  4.296694e-09
    4  500719    49710     0.100144      0.099422  5.224199e-06
    5  504588    50691     0.100918      0.101384  2.148699e-06
    6  499988    50044     0.099998      0.100090  8.497110e-08
    7  496196    49548     0.099239      0.099098  2.016157e-07
    8  498963    50107     0.099793      0.100216  1.790906e-06
    9  502415    50020     0.100483      0.100042  1.941479e-06

    """
    actual_min = actual.min()  # 实际中的最小概率
    actual_max = actual.max()  # 实际中的最大概率
    binlen = (actual_max - actual_min) / bins
    cuts = [actual_min + i * binlen for i in range(1, bins)]#设定分组
    cuts.insert(0, -float("inf"))
    cuts.append(float("inf"))
    actual_cuts = np.histogram(actual, bins=cuts)#将actual“等宽”分箱
    predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
    actual_df = pd.DataFrame(actual_cuts[0],columns=['actual'])
    predict_df = pd.DataFrame(predict_cuts[0], columns=['predict'])
    psi_df = pd.merge(actual_df,predict_df,right_index=True,left_index=True)
    psi_df['actual_rate'] = (psi_df['actual'] + 1) / psi_df['actual'].sum()#计算占比，分子加1，防止计算PSI时分子分母为0
    psi_df['predict_rate'] = (psi_df['predict'] + 1) / psi_df['predict'].sum()
    psi_df['psi'] = (psi_df['actual_rate'] - psi_df['predict_rate']) * np.log(
        psi_df['actual_rate'] / psi_df['predict_rate'])
    psi = psi_df['psi'].sum()
    return psi, psi_df


################

def stat_group_labs_rate(vSL): #, numAllY0,numAllY1):
  numLab1 = 0
  numLabs = len(vSL)
  minScore = 100000
  maxScore = -100
  vScores = []
  for kv in vSL:
    (score, sIdx) = kv[0]
    lab = kv[1]
    vScores.append(score)
    if score > maxScore:
      maxScore = score
    if score < minScore:
      minScore = score
    if lab > 0:
      numLab1 += 1
  numLab0 = numLabs - numLab1
#  print("numLabs:" + str(numLabs))
#  print("numLab1:" + str(numLab1))
#  print("rateLab1:" + str(numLab1/numLabs))
  rateLab1 = numLab1 / numLabs
  avgScore = sum(vScores) / len(vScores)

  return minScore, maxScore, avgScore, rateLab1, numLab1, numLabs

def calc_cur_group_iv(numAllY0, numAllY1, numCurY0, numCurY1):
  allTotal = numAllY0 + numAllY1
  curTotal = numCurY0 + numCurY1

  bad_rate = numCurY1 / curTotal
#  print("bad_rate:" + str(bad_rate))
  badattr = numCurY1 / numAllY1
  goodattr = numCurY0 / numAllY0
  WOEi = np.log(badattr / goodattr)
  IVi = (badattr-goodattr) * WOEi

#  woe = np.log((bad_rate/(1-bad_rate)) / (numAllY1/numAllY0))
#  rIv = (badattr-goodattr) * woe
  return IVi #, rIv


def calc_score_labs_bins_by_us(scores, labs, numGroups=10):  # 等频分箱（箱内数据量一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  sSls = sorted(dScoreLab.items(), key=lambda x: x[0][0],reverse=True)
#  vSSLs = []
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(numGroups-1):
    begIdx = sIdx
    endIdx = sIdx + numInGrp
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))
#    vSSLs.append(vCurRng)

    sIdx += numInGrp

#  print("last rng:" + str(sIdx) + "," + str(numItems))
  vCurRng = sSls[sIdx : numItems]
  minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
  sumLabs += numLabs
  sumLab1 += numLab1
  curLift = rateLab1/rateAllLab1
  cumRateLab1 = sumLab1 / sumLabs
  cumLift = cumRateLab1 / rateAllLab1

  curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

  vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

  return vRes
  
  

def calc_score_labs_bins_by_us_v2(scores, labs, numGroups=10): # 等宽分箱（各箱宽度一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
#  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  print("sSls:" +str(sSls))
  # sSls:[((0.14229898, 26025), 0), ((0.13870734, 18148), 0), ((0.13861467, 25775), 0), ..., ((0.072855696, 20921), 0), ((0.07275263, 3106), 0), ((0.07274652, 16943), 0)]
  vsScores = []
  for kv in sSls:
    vsScores.append(kv[0][0])
  
  maxScore = sSls[0][0][0]
  minScore = sSls[-1][0][0]
  print("maxScore:" + str(maxScore))
  print("minScore:" + str(minScore))
  # maxScore:(0.14229898, 26025)
  # minScore:(0.07274652, 16943)
  lenBin = (maxScore-minScore) / numGroups
  print("lenBin:" + str(lenBin))
  cuts = [minScore + i * lenBin for i in range(1, numGroups)]#设定分组
  cuts.insert(0, -float("inf"))
  cuts.append(float("inf"))
  print("cuts:" + str(cuts))
  actual_cuts = np.histogram(vsScores, bins=cuts)#将actual“等宽”分箱
#  predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
  print("actual_cuts:" + str(actual_cuts))
  print(type(actual_cuts))
  vGroupsLen = actual_cuts[0].tolist()
  vGroupsLen = vGroupsLen[::-1]  
   
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(len(vGroupsLen)): #numGroups-1):
    cGrpNum = vGroupsLen[gIdx]
    begIdx = sIdx
    endIdx = sIdx + cGrpNum
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

    sIdx += cGrpNum

#  print("last rng:" + str(sIdx) + "," + str(numItems))

  return vRes
  


def calc_date_ranges_effects(serDt, vPreds, vLabs, nDaysInGroup, tag, fOut):
  print("in calc_date_ranges_effects ...")
  dDtFreq = {}
  pIdx = 0
  for dt in serDt:
    cP = vPreds[pIdx]
    cL = vLabs[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append((cP,cL)) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  print(len(sDtFreqs))
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = ([],[])
      dGrpDts[cGrpIdx] = []
      
    for pp in kv[1]: # [0]:
      dGrpPreds[cGrpIdx][0].append(pp[0])
      dGrpPreds[cGrpIdx][1].append(pp[1])
#      dGrpPreds[cGrpIdx][0].append(pp) #kv[1][0])
#    for ll in kv[1][1]:
#      dGrpPreds[cGrpIdx][1].append(ll) #kv[1][1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  for kv in sGrpFreqs:
    cGIdx = kv[0]
    dtRng = (dGrpDts[cGIdx][0], dGrpDts[cGIdx][-1])
    print(str(cGIdx) + " - " + str(dtRng))
    vCPreds = kv[1][0]
    vCLabs = kv[1][1]
    print(len(vCPreds))
    print(len(vCLabs))
    
    cAUC = roc_auc_score(pd.Series(vCLabs), pd.Series(vCPreds))
    cKS = ks_score_V2(pd.Series(vCLabs), pd.Series(vCPreds))
    print(tag + ", auc:" + str(cAUC) + " - ks:" + str(cKS))
  

def calc_oot_preds_psis(serOotDt, serPreds, nDaysInGroup, tag,fOut):
  dDtFreq = {}
  pIdx = 0
  for dt in serOotDt:
    cP = serPreds[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append(cP) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = []
      dGrpDts[cGrpIdx] = []
    dGrpPreds[cGrpIdx].append(kv[1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpPreds = []
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  #for gIdx in dGrpPreds:
  for kv in sGrpFreqs:
    gIdx = kv[0]
    cGDtsPreds = dGrpPreds[gIdx]
    cFInfo = ""
    cGPreds = []
    for cv in cGDtsPreds:
      cFInfo = cFInfo + str(len(cv)) + ";"
      for cp in cv:
        cGPreds.append(cp)
#    print(tag + ", " +str(gIdx) + ", " + cFInfo)
    vGrpPreds.append(cGPreds)
    vGrpDtRng.append((dGrpDts[gIdx][0], dGrpDts[gIdx][-1]))
  
  for gIdx in range(len(vGrpPreds)):
    print(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])))
    if fOut is not None:
      fOut.write(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])) + "\n")
    if gIdx < len(vGrpPreds)-1:
      cGPs = vGrpPreds[gIdx]
      nGPs = vGrpPreds[gIdx+1]
      cGRng = vGrpDtRng[gIdx]
      nGRng = vGrpDtRng[gIdx+1]
      psi,psidf = cal_psi(pd.Series(cGPs), pd.Series(nGPs))
      print(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi))
      if fOut is not None:
        fOut.write(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi) + "\n")




rawPtnsPath = "../恒昌12万_青禾苗.xlsx"

pdData = pd.read_excel(rawPtnsPath)
pdData['mn'] = pdData['apply_date'].apply(lambda x:str(x)[:6])


featPath = "D:/yrProj/20240612_众安15w/青禾苗-幸福消金-20240603_report.xlsx/银融_青禾苗_三版模型/银融_青禾苗_入模特征_子分1.1.txt"

vFeats = []
for f in open(featPath):
  f = f.strip()
  if len(f) > 0:
    vFeats.append(f)




print("vFeats:")
print(vFeats)
print(len(vFeats))


#colLab = 'y1'
#colLab = 'y2'
colLab = 'y3'

ptnProxPath = "./预估分/w12_" + colLab +"_prox"
testProxPath = "./预估分/k49_" + colLab +"_prox"



pdPtn_train = pdData[pdData['flag'] == 'train']
if colLab == 'y3':
  pdPtn_train = pdPtn_train[pdPtn_train['y3']>=0]
  
pdPtn_valid = pdData[pdData['flag'] == 'valid']
if colLab == 'y3':
  pdPtn_valid = pdPtn_valid[pdPtn_valid['y3']>=0]

pdPtn_oot = pdData[pdData['flag'] == 'oot']
if colLab == 'y3':
  pdPtn_oot = pdPtn_oot[pdPtn_oot['y3']>=0]


print("pdPtn_train.y3.value_counts():")
print(pdPtn_train.y3.value_counts())
print("pdPtn_valid.y3.value_counts():")
print(pdPtn_valid.y3.value_counts())
print("pdPtn_oot.y3.value_counts():")
print(pdPtn_oot.y3.value_counts())


dParams = {'learning_rate':0.02, 'n_estimators':100, 'max_depth':3, 'min_child_weight':100}
#dParams = {'learning_rate':0.02, 'n_estimators':100, 'max_depth':3, 'min_child_weight':60}

if True:
  learning_rate = dParams['learning_rate']
  n_estimators = dParams['n_estimators']
  max_depth = dParams['max_depth']
  min_child_weight = dParams['min_child_weight']
  
  print(">>>>>")
  print(colLab)
#  print(vFeats)
  print("learning_rate="+str(learning_rate))
  print("n_estimators="+str(n_estimators))
  print("max_depth="+str(max_depth))
  print("min_child_weight="+str(min_child_weight))

  train_x = pdPtn_train[vFeats]
  train_y = pdPtn_train[colLab]
  test_x = pdPtn_valid[vFeats]
  test_y = pdPtn_valid[colLab]


  # 更新寻优后的参数
  xgb1 = XGBClassifier(
   learning_rate =learning_rate,
   n_estimators=n_estimators,
   max_depth=max_depth,
   min_child_weight=min_child_weight, #gamma=2.0, reg_lambda=2.0, reg_alpha=2.0, # subsample=0.8, colsample_bytree=0.8, 
#   eta=0.01,
   objective= 'binary:logistic',
   nthread=4,
#   scale_pos_weight=1,
   seed=27)


  print("参数寻优后训练：")
  xgb1.fit(train_x,train_y) 
  prex=xgb1.predict(test_x) 
  prox=xgb1.predict_proba(test_x)[:,1]
  prox_train=xgb1.predict_proba(train_x)[:,1]

  train_prox=xgb1.predict_proba(train_x)[:,1]
  print('train ROC: {}'.format(roc_auc_score(train_y,train_prox)))
  ks2_train = ks_score_V2(train_y,prox_train)
  print("ks2_train:" + str(ks2_train))


  print('test accuracy: {}'.format(accuracy_score(test_y,prex))) 
  print('test ROC: {}'.format(roc_auc_score(test_y,prox)))
  ks2_test = ks_score_V2(test_y,prox)
  print("ks2_test:" + str(ks2_test))


  #prex_oot = xgb1.predict(pdPtn_oot.drop(toDrop,axis=1)) 
  #prox_oot = xgb1.predict_proba(pdPtn_oot.drop(toDrop,axis=1))[:,1]
  prex_oot = xgb1.predict(pdPtn_oot[vFeats]) 
  prox_oot = xgb1.predict_proba(pdPtn_oot[vFeats])[:,1]
  print('oot accuracy: {}'.format(accuracy_score(pdPtn_oot[colLab], prex_oot))) 
  print('oot ROC: {}'.format(roc_auc_score(pdPtn_oot[colLab], prox_oot)))
  ks2_oot = ks_score_V2(pdPtn_oot[colLab],prox_oot)
  print("ks2_oot:" + str(ks2_oot))


  vSLStats = calc_score_labs_bins_by_us(prox_oot, pdPtn_oot[colLab], numGroups=10)
  for sl in vSLStats:
    print(sl)


  fi = xgb1.feature_importances_
  tc = list(train_x.columns)

  dFws = {}
  for fIdx in range(len(fi)):
    cc = tc[fIdx]
    cw = fi[fIdx]
    dFws[cc] = cw

  sFMWs = sorted(dFws.items(), key=lambda x: x[1],reverse=True)

  # 记录保存特征指标
  nF = 0
  for kv in sFMWs:
    ft = kv[0]
    wgt = kv[1]
    if wgt > 0.0:
      print("FI," + ft + "," + str(wgt))
      nF += 1
  print("nF:" + str(nF))
  
  calc_oot_preds_psis(pdPtn_oot['apply_date'], prox_oot, 4, "oot", None)

  calc_oot_preds_psis(pdPtn_valid['apply_date'], prox, 10, "test", None)
  calc_oot_preds_psis(pdPtn_train['apply_date'], prox_train, 10, "train", None)
#  calc_oot_preds_psis(pdData['apply_date'], prox_train, 10, None)

  psiTrainTest,_1 = cal_psi(pd.Series(prox_train), pd.Series(prox))
  psiTrainOot,_2 = cal_psi(pd.Series(prox_train), pd.Series(prox_oot))
  psiTestOot,_3 = cal_psi(pd.Series(prox), pd.Series(prox_oot))
  print("psiTrainTest:" + str(psiTrainTest))
  print("psiTrainOot:" + str(psiTrainOot))
  print("psiTestOot:" + str(psiTestOot))
  
  calc_date_ranges_effects(pdPtn_train['apply_date'], prox_train.tolist(), pdPtn_train[colLab].tolist(), 10, "train eff", None)
  calc_date_ranges_effects(pdPtn_valid['apply_date'], prox.tolist(), pdPtn_valid[colLab].tolist(), 10, "valid eff", None)
  calc_date_ranges_effects(pdPtn_oot['apply_date'], prox_oot.tolist(), pdPtn_oot[colLab].tolist(), 4, "oot eff", None)



# pdData
# ptnProxPath

# D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx
# testProxPath

prox_ptn = xgb1.predict_proba(pdData[vFeats])[:,1]
fOutPtn = open(ptnProxPath, 'w')
for pp in prox_ptn:
  fOutPtn.write(str(pp) + "\n")
fOutPtn.close()

pdTest = pd.read_excel("D:/yrProj/20240615_恒昌/恒昌样本/盲测49k_青禾苗特征.xlsx") #, encoding='utf-8')
prox_test = xgb1.predict_proba(pdTest[vFeats])[:,1]
fOutTest = open(testProxPath, 'w')
for pp in prox_test:
  fOutTest.write(str(pp) + "\n")
fOutTest.close()

'''
>>>>>
y3   <<<<<<<<<<<<<<<<<
learning_rate=0.02
n_estimators=100
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.6372043842214821
ks2_train:0.19281414435040678
test accuracy: 0.9461170693888099
test ROC: 0.6012723555108386
ks2_test:0.14837973925694192
oot accuracy: 0.9529198047155887
oot ROC: 0.6001222245506739
ks2_oot:0.14821173005780225
(0.0776, 0.1201, 0.0852, 2642, 0.0806, 1.7124, 1.7124, 0.0429)
(0.0712, 0.0775, 0.074, 2642, 0.0598, 1.2702, 1.4913, 0.0072)
(0.0683, 0.0712, 0.0697, 2642, 0.0572, 1.214, 1.3989, 0.0046)
(0.0658, 0.0683, 0.0671, 2642, 0.0515, 1.0934, 1.3225, 0.0009)
(0.0633, 0.0658, 0.0646, 2642, 0.0473, 1.0049, 1.259, 0.0)
(0.0608, 0.0633, 0.0621, 2642, 0.0439, 0.9326, 1.2046, 0.0005)
(0.0581, 0.0608, 0.0595, 2642, 0.0466, 0.9889, 1.1738, 0.0)
(0.0549, 0.0581, 0.0565, 2642, 0.0318, 0.6753, 1.1115, 0.0139)
(0.0504, 0.0549, 0.0529, 2642, 0.0303, 0.6432, 1.0594, 0.0172)
(0.038, 0.0504, 0.0464, 2645, 0.0219, 0.4658, 1.0, 0.0443)
FI,S01_n23,0.028930366
FI,S05_n12,0.028513331
FI,S01_n17,0.026545469
FI,S03_n26,0.025838763
FI,S03_n23,0.025177572
FI,S03_n43,0.024471307
FI,S03_n65,0.02436549
FI,S02_n85,0.024142578
FI,S03_n27,0.023464475
FI,S03_n104,0.021602008
FI,S03_n75,0.021281539
FI,S03_n87,0.020527635
FI,S02_n88,0.019823512
FI,S02_n53,0.019529233
FI,S02_n48,0.019370735
FI,S03_n55,0.019284984
FI,S01_n22,0.019220296
FI,S03_n102,0.018961152
FI,S03_n86,0.018911691
FI,S02_n83,0.018856205
FI,S03_n92,0.018669063
FI,S03_n100,0.01853081
FI,S02_n79,0.018293956
FI,S01_n9,0.017730668
FI,S01_n10,0.017155066
FI,S02_n72,0.0171014
FI,S03_n68,0.016593844
FI,S03_n98,0.01636461
FI,S03_n48,0.015120477
FI,S03_n22,0.015045392
FI,S03_n44,0.014664154
FI,S01_n12,0.014174172
FI,S03_n41,0.014020492
FI,S02_n80,0.013934658
FI,S03_n80,0.013849485
FI,S03_n66,0.013624368
FI,S01_n11,0.013592861
FI,S03_n105,0.013591719
FI,S04_n14,0.01314831
FI,S02_n44,0.0128179295
FI,S03_n37,0.01241973
FI,S03_n70,0.012416981
FI,S03_n69,0.012347024
FI,S01_n13,0.012327175
FI,S03_n74,0.012262378
FI,S03_n49,0.011806845
FI,S02_n74,0.011766631
FI,S05_n5,0.011106119
FI,S02_n40,0.010955547
FI,S03_n84,0.010903392
FI,S03_n4,0.0108622005
FI,S03_n45,0.010717721
FI,S01_n21,0.010672517
FI,S03_n71,0.010586238
FI,S02_n84,0.0100915255
FI,S03_n10,0.010053135
FI,S03_n83,0.009388222
FI,S03_n19,0.00887936
FI,S01_n6,0.008034429
FI,S03_n40,0.007941956
FI,S05_n8,0.007867313
FI,S03_n51,0.007756131
FI,S03_n50,0.0056026373
FI,S05_n2,0.0033181026
FI,S03_n31,0.003074897
nF:65
oot， grp_0, 4549
oot, psi of (20240113, 20240116) vs (20240117, 20240120) : 0.005679332335293586
oot， grp_1, 5731
oot, psi of (20240117, 20240120) vs (20240121, 20240124) : 0.005696632724588767
oot， grp_2, 5928
oot, psi of (20240121, 20240124) vs (20240125, 20240128) : 0.0009855356505796004
oot， grp_3, 6335
oot, psi of (20240125, 20240128) vs (20240129, 20240131) : 0.01977392557640621
oot， grp_4, 3880
test， grp_0, 2203
test, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.006523345125331547
test， grp_1, 3778
test, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.003697587326710354
test， grp_2, 2210
test, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.017198389527578295
test， grp_3, 2279
test, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.004615070601783451
test， grp_4, 2167
test, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.008987414540253164
test， grp_5, 2642
test, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.06471761340408327
test， grp_6, 199
train， grp_0, 8817
train, psi of (20231113, 20231122) vs (20231123, 20231202) : 0.004101051641787814
train， grp_1, 14997
train, psi of (20231123, 20231202) vs (20231203, 20231212) : 0.004214933073081316
train， grp_2, 8703
train, psi of (20231203, 20231212) vs (20231213, 20231222) : 0.0042596298430771186
train， grp_3, 9283
train, psi of (20231213, 20231222) vs (20231223, 20240101) : 0.0034523918812789664
train， grp_4, 8906
train, psi of (20231223, 20240101) vs (20240102, 20240111) : 0.0018336486140089045
train， grp_5, 10408
train, psi of (20240102, 20240111) vs (20240112, 20240112) : 0.008534981174060829
train， grp_6, 805
psiTrainTest:0.0003006100791297465
psiTrainOot:0.0040844300112731135
psiTestOot:0.0032620038990091916
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
8817
8817
train eff, auc:0.6380323527624773 - ks:0.2023433227970498
1 - (20231123, 20231202)
14997
14997
train eff, auc:0.6442308846662601 - ks:0.21139425812394408
2 - (20231203, 20231212)
8703
8703
train eff, auc:0.6408262760050838 - ks:0.22170098334336746
3 - (20231213, 20231222)
9283
9283
train eff, auc:0.6261868950848721 - ks:0.1771954390395234
4 - (20231223, 20240101)
8906
8906
train eff, auc:0.6409925458264496 - ks:0.22001175709577883
5 - (20240102, 20240111)
10408
10408
train eff, auc:0.6294848057109267 - ks:0.18602294367570496
6 - (20240112, 20240112)
805
805
train eff, auc:0.6334280423956071 - ks:0.22589707572468398
in calc_date_ranges_effects ...
61
0 - (20231113, 20231122)
2203
2203
valid eff, auc:0.6026388058851988 - ks:0.1820448789094738
1 - (20231123, 20231202)
3778
3778
valid eff, auc:0.6271965226377392 - ks:0.19114253477873122
2 - (20231203, 20231212)
2210
2210
valid eff, auc:0.5736279553073447 - ks:0.1506449445380743
3 - (20231213, 20231222)
2279
2279
valid eff, auc:0.6110586350375435 - ks:0.18450587466322316
4 - (20231223, 20240101)
2167
2167
valid eff, auc:0.5853402140672783 - ks:0.15671273941735075
5 - (20240102, 20240111)
2642
2642
valid eff, auc:0.5936054580593279 - ks:0.17001699158203298
6 - (20240112, 20240112)
199
199
valid eff, auc:0.5590643274853802 - ks:0.17543859649122806
in calc_date_ranges_effects ...
19
0 - (20240113, 20240116)
4549
4549
oot eff, auc:0.604569529361581 - ks:0.1606690308712707
1 - (20240117, 20240120)
5731
5731
oot eff, auc:0.620756905116261 - ks:0.19604924908672705
2 - (20240121, 20240124)
5928
5928
oot eff, auc:0.5915288595526219 - ks:0.1464677667766776
3 - (20240125, 20240128)
6335
6335
oot eff, auc:0.5873872910329525 - ks:0.15921376544933125
4 - (20240129, 20240131)
3880
3880
oot eff, auc:0.5996852150698304 - ks:0.15756181140796527


'''