
import pandas as pd
import numpy as np

import math
from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

import pandas as pd
import numpy as np

import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns

from xgboost import XGBRegressor
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error
import xgboost as xgb

from sklearn.linear_model import LogisticRegression


from sklearn.metrics import classification_report,roc_auc_score,accuracy_score 
from xgboost.sklearn import XGBClassifier


#更标准版：
from sklearn import metrics
def ks_score_V2(y_true,y_pred):
    tpr,fpr,threshold=metrics.roc_curve(np.array(y_true),y_pred)
    ks=abs(tpr-fpr).max()
    #print(threshold)
    return ks


# 定义自动分箱函数---最优分箱
def mono_bin(Y, X, n ):
    r = 0  #设定斯皮尔曼 初始值
#    good=Y.sum()   #好客户的人数
#    bad=Y.count()-good   #坏客户的人数
    good=Y.sum()   #目标客户的人数
    bad=Y.count()-good  #非目标客户的人数

#    seXNorm,seYNorm,seXAbnorm,seYAbnorm = split_abnorm_ptns(X,Y)

    rowN = n

  #下面这段就是分箱的核心 ，就是 机器来 选择 指定 最优的分箱节点，代替我们自己来设置
    while np.abs(r) < 0.95: #1:   #while ,不满足条件时，跳出循环
#        X = seXNorm
#        Y = seYNorm

        d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates="drop")}) #注意这里是pd.qcut, Bucket： 将 X 分为 n 段，n由 斯皮尔曼系数决定
        d2 = d1.groupby('Bucket', as_index = True,  observed=True)
#        print("d2.mean().X:")
#        print(str(d2.mean().X))
#        print("d2.mean().Y:")
#        print(str(d2.mean().Y))

        r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)    #　？　？　？    # 以斯皮尔曼系数作为分箱终止条件
#        print("r:" + str(r))
#        print("n:" + str(n))
        n = n - 1
#    print("d2:" + str(d2))
#    print("res n:" + str(n))
#    print("res r:" + str(r))
    if str(r) == 'nan':
      n = 1

    d3 = pd.DataFrame(d2.X.min(), columns = ['min'])
    d3['min']=d2.min().X    #  min 就是分箱的节点
    d3['max'] = d2.max().X
    d3['sum'] = d2.sum().Y
    d3['total'] = d2.count().Y
    d3['rate'] = d2.mean().Y
    d3['woe']=np.log((d3['rate']/(1-d3['rate']))/(good/bad))
    d3['goodattribute']=d3['sum']/good
    d3['badattribute']=(d3['total']-d3['sum'])/bad

    rawIvs = (d3['goodattribute']-d3['badattribute'])*d3['woe']
#    print("rawIvs:" + str(rawIvs))
    d3['bktIV'] = rawIvs

    iv=((d3['goodattribute']-d3['badattribute'])*d3['woe']).sum()   #返回 iv

#    dAbn = calc_abnorm_infos(seXAbnorm,seYAbnorm, good,bad)  # TODO
#    d3 = pd.concat([d3,dAbn],ignore_index=True)

    d4 = (d3.sort_values(by = 'min')).reset_index(drop=True)    # 返回 d
    woe=list(d4['woe'].round(3))             #返回 woe
    cut=[]    #  cut 存放箱段节点
    cut.append(float('-inf'))  # 在 列表前加 -inf
    for i in range(1,n+1):            # n 是前面的 分箱的 分割数  ，所以 分成n+1份
         qua=X.quantile(i/(n+1))     #quantile 分为数  得到分箱的节点
         cut.append(round(qua,4))   # 保留4位小数       #返回cut
    cut.append(float('inf')) # 在列表后加  inf
#    print("d4:" + str(d4))
#    print("iv:" + str(iv))
#    print("cut:" + str(cut))
#    print("woe:" + str(woe))
    return d4,iv,cut,woe


def bin_frequency(x,y,n=10): # x为待分箱的变量，y为target变量.n为分箱数量
    total = y.count()       #1 计算总样本数
    bad = y.sum()           #2 计算坏样本数
    good = total-bad        #3 计算好样本数
    if x.value_counts().shape[0]==2:    #4 如果该变量值是0和1则只分两组
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
    else:
        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')})  #5 用pd.cut实现等频分箱
#        d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,n,duplicates='drop')})
    d2 = d1.groupby('bucket',as_index=True)  #6 按照分箱结果进行分组聚合
    d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
    d3['min_bin'] = d2.x.min()               #7 箱体的左边界
    d3['max_bin'] = d2.x.max()               #8 箱体的右边界
    d3['bad'] = d2.y.sum()                   #9 每个箱体中坏样本的数量
    d3['total'] = d2.y.count()               #10 每个箱体的总样本数
    d3['bad_rate'] = d3['bad']/d3['total']   #11 每个箱体中坏样本所占总样本数的比例
    d3['badattr'] = d3['bad']/bad            #12 每个箱体中坏样本所占坏样本总数的比例
    d3['goodattr'] = (d3['total'] - d3['bad'])/good    #13 每个箱体中好样本所占好样本总数的比例
    d3['WOEi'] = np.log(d3['badattr']/d3['goodattr'])  #14 计算每个箱体的woe值
    IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum()  #15 计算变量的iv值
    d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']   #16 计算IV
    d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
    cut = []
    cut.append(float('-inf'))
    for i in d4.min_bin:
        cut.append(i)
    cut.append(float('inf'))
    WOEi = list(d4['WOEi'].round(3))
    return IV,cut,WOEi,d4


def cal_psi(actual, predict, bins=10):
    """
    功能: 计算PSI值，并输出实际和预期占比分布曲线
    :param actual: Array或series，代表真实数据，如训练集模型得分
    :param predict: Array或series，代表预期数据，如测试集模型得分
    :param bins: 分段数
    :return:
        psi: float，PSI值
        psi_df:DataFrame

    Examples
    -----------------------------------------------------------------
    >>> import random
    >>> act = np.array([random.random() for _ in range(5000000)])
    >>> pct = np.array([random.random() for _ in range(500000)])
    >>> psi, psi_df = cal_psi(act,pct)
    >>> psi
    1.65652278590053e-05
    >>> psi_df
       actual  predict  actual_rate  predict_rate           psi
    0  498285    49612     0.099657      0.099226  1.869778e-06
    1  500639    50213     0.100128      0.100428  8.975056e-07
    2  504335    50679     0.100867      0.101360  2.401777e-06
    3  493872    49376     0.098775      0.098754  4.296694e-09
    4  500719    49710     0.100144      0.099422  5.224199e-06
    5  504588    50691     0.100918      0.101384  2.148699e-06
    6  499988    50044     0.099998      0.100090  8.497110e-08
    7  496196    49548     0.099239      0.099098  2.016157e-07
    8  498963    50107     0.099793      0.100216  1.790906e-06
    9  502415    50020     0.100483      0.100042  1.941479e-06

    """
    actual_min = actual.min()  # 实际中的最小概率
    actual_max = actual.max()  # 实际中的最大概率
    binlen = (actual_max - actual_min) / bins
    cuts = [actual_min + i * binlen for i in range(1, bins)]#设定分组
    cuts.insert(0, -float("inf"))
    cuts.append(float("inf"))
    actual_cuts = np.histogram(actual, bins=cuts)#将actual“等宽”分箱
    predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
    actual_df = pd.DataFrame(actual_cuts[0],columns=['actual'])
    predict_df = pd.DataFrame(predict_cuts[0], columns=['predict'])
    psi_df = pd.merge(actual_df,predict_df,right_index=True,left_index=True)
    psi_df['actual_rate'] = (psi_df['actual'] + 1) / psi_df['actual'].sum()#计算占比，分子加1，防止计算PSI时分子分母为0
    psi_df['predict_rate'] = (psi_df['predict'] + 1) / psi_df['predict'].sum()
    psi_df['psi'] = (psi_df['actual_rate'] - psi_df['predict_rate']) * np.log(
        psi_df['actual_rate'] / psi_df['predict_rate'])
    psi = psi_df['psi'].sum()
    return psi, psi_df


################

def stat_group_labs_rate(vSL): #, numAllY0,numAllY1):
  numLab1 = 0
  numLabs = len(vSL)
  minScore = 100000
  maxScore = -100
  vScores = []
  for kv in vSL:
    (score, sIdx) = kv[0]
    lab = kv[1]
    vScores.append(score)
    if score > maxScore:
      maxScore = score
    if score < minScore:
      minScore = score
    if lab > 0:
      numLab1 += 1
  numLab0 = numLabs - numLab1
#  print("numLabs:" + str(numLabs))
#  print("numLab1:" + str(numLab1))
#  print("rateLab1:" + str(numLab1/numLabs))
  rateLab1 = numLab1 / numLabs
  avgScore = sum(vScores) / len(vScores)

  return minScore, maxScore, avgScore, rateLab1, numLab1, numLabs

def calc_cur_group_iv(numAllY0, numAllY1, numCurY0, numCurY1):
  allTotal = numAllY0 + numAllY1
  curTotal = numCurY0 + numCurY1

  bad_rate = numCurY1 / curTotal
#  print("bad_rate:" + str(bad_rate))
  badattr = numCurY1 / numAllY1
  goodattr = numCurY0 / numAllY0
  WOEi = np.log(badattr / goodattr)
  IVi = (badattr-goodattr) * WOEi

#  woe = np.log((bad_rate/(1-bad_rate)) / (numAllY1/numAllY0))
#  rIv = (badattr-goodattr) * woe
  return IVi #, rIv


def calc_score_labs_bins_by_us(scores, labs, numGroups=10):  # 等频分箱（箱内数据量一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  sSls = sorted(dScoreLab.items(), key=lambda x: x[0][0],reverse=True)
#  vSSLs = []
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(numGroups-1):
    begIdx = sIdx
    endIdx = sIdx + numInGrp
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))
#    vSSLs.append(vCurRng)

    sIdx += numInGrp

#  print("last rng:" + str(sIdx) + "," + str(numItems))
  vCurRng = sSls[sIdx : numItems]
  minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
  sumLabs += numLabs
  sumLab1 += numLab1
  curLift = rateLab1/rateAllLab1
  cumRateLab1 = sumLab1 / sumLabs
  cumLift = cumRateLab1 / rateAllLab1

  curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

  vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

  return vRes
  
  

def calc_score_labs_bins_by_us_v2(scores, labs, numGroups=10): # 等宽分箱（各箱宽度一致）
  vRes = []

#  print("len(scores):" + str(len(scores)))
#  print("len(labs):" + str(len(labs)))
  numItems = len(scores)
#  numInGrp = int(numItems / numGroups)
#  print("numInGrp:" + str(numInGrp))

  dScoreLab = {}
  numAllLab1 = 0
  numAllLabs = numItems
  sIdx = 0
  for l in labs:
    s = scores[sIdx]
    dScoreLab[(s, sIdx)] = l
    if l == 1:
      numAllLab1 += 1
    sIdx += 1

  numAllLab0 = numAllLabs - numAllLab1
#  print("numAllLab1:" + str(numAllLab1))
#  print("numAllLabs:" + str(numAllLabs))
#  print("numAllLab0:" + str(numAllLab0))

  rateAllLab1 = numAllLab1 / numAllLabs
#  print("rateAllLab1:" + str(rateAllLab1))

  sSls = sorted(dScoreLab.items(), key=lambda x: (x[0][0],x[1]),reverse=True)
#  print("sSls:" +str(sSls))
  # sSls:[((0.14229898, 26025), 0), ((0.13870734, 18148), 0), ((0.13861467, 25775), 0), ..., ((0.072855696, 20921), 0), ((0.07275263, 3106), 0), ((0.07274652, 16943), 0)]
  vsScores = []
  for kv in sSls:
    vsScores.append(kv[0][0])
  
  maxScore = sSls[0][0][0]
  minScore = sSls[-1][0][0]
  print("maxScore:" + str(maxScore))
  print("minScore:" + str(minScore))
  # maxScore:(0.14229898, 26025)
  # minScore:(0.07274652, 16943)
  lenBin = (maxScore-minScore) / numGroups
  print("lenBin:" + str(lenBin))
  cuts = [minScore + i * lenBin for i in range(1, numGroups)]#设定分组
  cuts.insert(0, -float("inf"))
  cuts.append(float("inf"))
  print("cuts:" + str(cuts))
  actual_cuts = np.histogram(vsScores, bins=cuts)#将actual“等宽”分箱
#  predict_cuts = np.histogram(predict, bins=cuts)#将predict按actual的分组“等宽”分箱
  print("actual_cuts:" + str(actual_cuts))
  print(type(actual_cuts))
  vGroupsLen = actual_cuts[0].tolist()
  vGroupsLen = vGroupsLen[::-1]  
   
  sIdx = 0
  sumLabs = 0
  sumLab1 = 0
  for gIdx in range(len(vGroupsLen)): #numGroups-1):
    cGrpNum = vGroupsLen[gIdx]
    begIdx = sIdx
    endIdx = sIdx + cGrpNum
#    print("cur rng:" + str(begIdx) + "," + str(endIdx))

    vCurRng = sSls[begIdx : endIdx]
    minScore, maxScore, avgScore, rateLab1, numLab1, numLabs = stat_group_labs_rate(vCurRng)
    sumLabs += numLabs
    sumLab1 += numLab1
    curLift = rateLab1/rateAllLab1
    cumRateLab1 = sumLab1 / sumLabs
    cumLift = cumRateLab1 / rateAllLab1

    curIv = calc_cur_group_iv(numAllLab0, numAllLab1, numLabs-numLab1, numLab1)

    vRes.append((round(minScore,4), round(maxScore,4), round(avgScore,4), len(vCurRng), round(rateLab1,4), round(curLift,4), round(cumLift,4), round(curIv,4)))

    sIdx += cGrpNum

#  print("last rng:" + str(sIdx) + "," + str(numItems))

  return vRes
  


def calc_date_ranges_effects(serDt, vPreds, vLabs, nDaysInGroup, tag, fOut):
  print("in calc_date_ranges_effects ...")
  dDtFreq = {}
  pIdx = 0
  for dt in serDt:
    cP = vPreds[pIdx]
    cL = vLabs[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append((cP,cL)) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  print(len(sDtFreqs))
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = ([],[])
      dGrpDts[cGrpIdx] = []
      
    for pp in kv[1]: # [0]:
      dGrpPreds[cGrpIdx][0].append(pp[0])
      dGrpPreds[cGrpIdx][1].append(pp[1])
#      dGrpPreds[cGrpIdx][0].append(pp) #kv[1][0])
#    for ll in kv[1][1]:
#      dGrpPreds[cGrpIdx][1].append(ll) #kv[1][1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  for kv in sGrpFreqs:
    cGIdx = kv[0]
    dtRng = (dGrpDts[cGIdx][0], dGrpDts[cGIdx][-1])
    print(str(cGIdx) + " - " + str(dtRng))
    vCPreds = kv[1][0]
    vCLabs = kv[1][1]
    print(len(vCPreds))
    print(len(vCLabs))
    
    cAUC = roc_auc_score(pd.Series(vCLabs), pd.Series(vCPreds))
    cKS = ks_score_V2(pd.Series(vCLabs), pd.Series(vCPreds))
    print(tag + ", auc:" + str(cAUC) + " - ks:" + str(cKS))
  

def calc_oot_preds_psis(serOotDt, serPreds, nDaysInGroup, tag,fOut):
  dDtFreq = {}
  pIdx = 0
  for dt in serOotDt:
    cP = serPreds[pIdx]
    if dt not in dDtFreq:
      dDtFreq[dt] = []
    dDtFreq[dt].append(cP) #= dDtFreq.get(dt, 0) + 1
    pIdx += 1
  sDtFreqs = sorted(dDtFreq.items(), key=lambda x: x[0],reverse=False)
  
  dIdx = 0
  dGrpPreds = {}
  dGrpDts = {}
  for kv in sDtFreqs:
#    print(tag+" dt f:" + str(kv[0]) + ", " + str(len(kv[1])))
    cGrpIdx = math.floor(dIdx/nDaysInGroup)
    if cGrpIdx not in dGrpPreds:
      dGrpPreds[cGrpIdx] = []
      dGrpDts[cGrpIdx] = []
    dGrpPreds[cGrpIdx].append(kv[1])
    dGrpDts[cGrpIdx].append(kv[0])
    dIdx += 1
    
  vGrpPreds = []
  vGrpDtRng = []
  sGrpFreqs = sorted(dGrpPreds.items(), key=lambda x: x[0],reverse=False)
  #for gIdx in dGrpPreds:
  for kv in sGrpFreqs:
    gIdx = kv[0]
    cGDtsPreds = dGrpPreds[gIdx]
    cFInfo = ""
    cGPreds = []
    for cv in cGDtsPreds:
      cFInfo = cFInfo + str(len(cv)) + ";"
      for cp in cv:
        cGPreds.append(cp)
#    print(tag + ", " +str(gIdx) + ", " + cFInfo)
    vGrpPreds.append(cGPreds)
    vGrpDtRng.append((dGrpDts[gIdx][0], dGrpDts[gIdx][-1]))
  
  for gIdx in range(len(vGrpPreds)):
    print(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])))
    if fOut is not None:
      fOut.write(tag + "， grp_" + str(gIdx) + ", " + str(len(vGrpPreds[gIdx])) + "\n")
    if gIdx < len(vGrpPreds)-1:
      cGPs = vGrpPreds[gIdx]
      nGPs = vGrpPreds[gIdx+1]
      cGRng = vGrpDtRng[gIdx]
      nGRng = vGrpDtRng[gIdx+1]
      psi,psidf = cal_psi(pd.Series(cGPs), pd.Series(nGPs))
      print(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi))
      if fOut is not None:
        fOut.write(tag + ", psi of " + str(cGRng)+" vs " + str(nGRng) + " : " + str(psi) + "\n")




rawPtnsPath = "../恒昌10万_青禾苗.xlsx"

pdData = pd.read_excel(rawPtnsPath)
pdData['mn'] = pdData['apply_date'].apply(lambda x:str(x)[:6])
print(pdData)
print(pdData.columns.tolist())

featPath = "D:/yrProj/20240612_众安15w/青禾苗-幸福消金-20240603_report.xlsx/银融_青禾苗_三版模型/银融_青禾苗_入模特征_子分1.1.txt"

vFeats = []
vTrainCols = []
for f in open(featPath):
  f = f.strip()
  if len(f) > 0:
    vFeats.append(f)
    vTrainCols.append(f)


print("vFeats:")
print(vFeats)
print(len(vFeats))


pdPtn1 = pdData[pdData['fpd15_flag']>=0]
pdPtn2 = pdData[pdData['mob3_15_flag']>=0]

print(pdPtn1[['fpd15_flag','mn']].value_counts())
print(pdPtn2[['mob3_15_flag','mn']].value_counts())

pdPtn_train = pdPtn1[pdPtn1['apply_date'] <= 20240315]
pdPtn_oot = pdPtn1[pdPtn1['apply_date'] > 20240315]
colLab = 'fpd15_flag'

#pdPtn_train = pdPtn2[pdPtn2['apply_date'] <= 20240115]
#pdPtn_oot = pdPtn2[pdPtn2['apply_date'] > 20240115]
#colLab = 'mob3_15_flag'

ptnProxPath = "./预估分/w10_" + colLab +"_prox"
testProxPath = "./预估分/k42_" + colLab +"_prox"

print(pdPtn_train[[colLab,'mn']].value_counts())
print(pdPtn_oot[[colLab,'mn']].value_counts())


#print("oot cols:")
#print(pdPtn_oot.columns.tolist())
#print(pdPtn_oot.apply_date.value_counts())
#exit()

vTrainCols.append('apply_date')
vTrainCols.append(colLab)

#train_x,test_x,train_y,test_y = train_test_split(pdPtn_train.drop(toDrop,axis=1), pdPtn_train[colLab], test_size=0.3, random_state=0)
train_x,test_x,train_y,test_y = train_test_split(pdPtn_train[vTrainCols], pdPtn_train[colLab], test_size=0.3, random_state=0)

print("train_x.columns:")
print(train_x.columns.tolist())
print(len(train_x.columns.tolist()))

dParams = {'learning_rate':0.01, 'n_estimators':50, 'max_depth':3, 'min_child_weight':100}

learning_rate = dParams['learning_rate']
n_estimators = dParams['n_estimators']
max_depth = dParams['max_depth']
min_child_weight = dParams['min_child_weight']
  
print(">>>>>")
print(colLab)
#  print(vFeats)
print("learning_rate="+str(learning_rate))
print("n_estimators="+str(n_estimators))
print("max_depth="+str(max_depth))
print("min_child_weight="+str(min_child_weight))

# 更新寻优后的参数
xgb1 = XGBClassifier(
 learning_rate =learning_rate,
 n_estimators=n_estimators,
 max_depth=max_depth,
 min_child_weight=min_child_weight, #gamma=2.0, reg_lambda=2.0, reg_alpha=2.0, # subsample=0.8, colsample_bytree=0.8, 
# eta=0.01,
 objective= 'binary:logistic',
 nthread=4,
# scale_pos_weight=1,
 seed=27)

print("参数寻优后训练：")
xgb1.fit(train_x[vFeats],train_y) 
prex=xgb1.predict(test_x[vFeats]) 
prox=xgb1.predict_proba(test_x[vFeats])[:,1]
prox_train=xgb1.predict_proba(train_x[vFeats])[:,1]


train_prox=xgb1.predict_proba(train_x[vFeats])[:,1]
print('train ROC: {}'.format(roc_auc_score(train_y,train_prox)))
ks2_train = ks_score_V2(train_y,prox_train)
print("ks2_train:" + str(ks2_train))


print('test accuracy: {}'.format(accuracy_score(test_y,prex))) 
print('test ROC: {}'.format(roc_auc_score(test_y,prox)))
ks2_test = ks_score_V2(test_y,prox)
print("ks2_test:" + str(ks2_test))


#prex_oot = xgb1.predict(pdPtn_oot.drop(toDrop,axis=1)) 
#prox_oot = xgb1.predict_proba(pdPtn_oot.drop(toDrop,axis=1))[:,1]
prex_oot = xgb1.predict(pdPtn_oot[vFeats]) 
prox_oot = xgb1.predict_proba(pdPtn_oot[vFeats])[:,1]
print('oot accuracy: {}'.format(accuracy_score(pdPtn_oot[colLab], prex_oot))) 
print('oot ROC: {}'.format(roc_auc_score(pdPtn_oot[colLab], prox_oot)))
ks2_oot = ks_score_V2(pdPtn_oot[colLab],prox_oot)
print("ks2_oot:" + str(ks2_oot))


if True:
  
  vSLStats = calc_score_labs_bins_by_us(prox_oot, pdPtn_oot[colLab], numGroups=10)
  for sl in vSLStats:
    print(sl)


  fi = xgb1.feature_importances_
  tc = list(train_x.columns)

  dFws = {}
  for fIdx in range(len(fi)):
    cc = tc[fIdx]
    cw = fi[fIdx]
    dFws[cc] = cw

  sFMWs = sorted(dFws.items(), key=lambda x: x[1],reverse=True)

  # 记录保存特征指标
  nF = 0
  for kv in sFMWs:
    ft = kv[0]
    wgt = kv[1]
    if wgt > 0.0:
      print("FI," + ft + "," + str(wgt))
      nF += 1
  print("nF:" + str(nF))
  
  calc_oot_preds_psis(pdPtn_oot['apply_date'], prox_oot, 4, "oot", None)

  calc_oot_preds_psis(test_x['apply_date'], prox, 10, "test", None)
  calc_oot_preds_psis(train_x['apply_date'], prox_train, 10, "train", None)
#  calc_oot_preds_psis(pdData['apply_date'], prox_train, 10, None)

  psiTrainTest,_1 = cal_psi(pd.Series(prox_train), pd.Series(prox))
  psiTrainOot,_2 = cal_psi(pd.Series(prox_train), pd.Series(prox_oot))
  psiTestOot,_3 = cal_psi(pd.Series(prox), pd.Series(prox_oot))
  print("psiTrainTest:" + str(psiTrainTest))
  print("psiTrainOot:" + str(psiTrainOot))
  print("psiTestOot:" + str(psiTestOot))
  
  calc_date_ranges_effects(train_x['apply_date'], prox_train.tolist(), train_x[colLab].tolist(), 10, "train eff", None)
  calc_date_ranges_effects(test_x['apply_date'], prox.tolist(), test_x[colLab].tolist(), 10, "valid eff", None)
  calc_date_ranges_effects(pdPtn_oot['apply_date'], prox_oot.tolist(), pdPtn_oot[colLab].tolist(), 4, "oot eff", None)



# pdData
# ptnProxPath

# D:/yrProj/20240615_恒昌/恒昌样本/盲测42k_青禾苗特征.xlsx
# testProxPath

prox_ptn = xgb1.predict_proba(pdData[vFeats])[:,1]
fOutPtn = open(ptnProxPath, 'w')
for pp in prox_ptn:
  fOutPtn.write(str(pp) + "\n")
fOutPtn.close()

pdTest = pd.read_excel("D:/yrProj/20240615_恒昌/恒昌样本/盲测42k_青禾苗特征.xlsx") #, encoding='utf-8')
prox_test = xgb1.predict_proba(pdTest[vFeats])[:,1]
fOutTest = open(testProxPath, 'w')
for pp in prox_test:
  fOutTest.write(str(pp) + "\n")
fOutTest.close()



'''
>>>>>
fpd15_flag    <<<<<<<<<<<<<<
learning_rate=0.01
n_estimators=50
max_depth=3
min_child_weight=100
参数寻优后训练：
train ROC: 0.5972702983072936
ks2_train:0.13857569214513082
test accuracy: 0.8986573263280794
test ROC: 0.5592372005611139
ks2_test:0.1037134283570893
oot accuracy: 0.9047244782115099
oot ROC: 0.5519261667061851
ks2_oot:0.09038208366805983
(0.1489, 0.1691, 0.1556, 1145, 0.1406, 1.4758, 1.4758, 0.0232)
(0.1458, 0.1489, 0.1472, 1145, 0.1144, 1.2008, 1.3383, 0.0045)
(0.1444, 0.1458, 0.145, 1145, 0.1048, 1.1, 1.2589, 0.0012)
(0.1424, 0.1444, 0.1434, 1145, 0.0882, 0.9258, 1.1756, 0.0007)
(0.1407, 0.1424, 0.1416, 1145, 0.083, 0.8708, 1.1147, 0.0022)
(0.1366, 0.1407, 0.1385, 1145, 0.0969, 1.0175, 1.0985, 0.0)
(0.1347, 0.1366, 0.1355, 1145, 0.0812, 0.8525, 1.0633, 0.0029)
(0.1337, 0.1347, 0.134, 1145, 0.0786, 0.825, 1.0335, 0.0041)
(0.1319, 0.1337, 0.1327, 1145, 0.0882, 0.9258, 1.0216, 0.0007)
(0.1292, 0.1319, 0.1307, 1146, 0.0768, 0.806, 1.0, 0.0051)
FI,S03_n46,0.07412061
FI,S02_n88,0.07387588
FI,S02_n65,0.056970544
FI,S03_n22,0.042959247
FI,S05_n9,0.04205018
FI,S03_n35,0.040321413
FI,S04_n21,0.040301565
FI,S02_n48,0.039065108
FI,S03_n106,0.03770583
FI,S03_n20,0.035732724
FI,S03_n29,0.0343632
FI,S03_n45,0.03431518
FI,S03_n11,0.033913262
FI,S03_n100,0.03337608
FI,S03_n25,0.028344577
FI,S03_n64,0.027850095
FI,S05_n2,0.027540242
FI,S03_n28,0.02718965
FI,S03_n50,0.025394863
FI,S01_n11,0.024495462
FI,S02_n83,0.024467414
FI,S03_n32,0.023731463
FI,S03_n51,0.022928096
FI,S01_n9,0.022492182
FI,S03_n7,0.021816896
FI,S03_n47,0.021572204
FI,S03_n4,0.021202782
FI,S01_n16,0.020746185
FI,S03_n70,0.020589367
FI,S04_n22,0.020567693
nF:30
oot， grp_0, 2458
oot, psi of (20240316, 20240319) vs (20240320, 20240323) : 0.012043946665277302
oot， grp_1, 2751
oot, psi of (20240320, 20240323) vs (20240324, 20240327) : 0.026076468032149188
oot， grp_2, 2710
oot, psi of (20240324, 20240327) vs (20240328, 20240331) : 0.10727528027822626
oot， grp_3, 3532
test， grp_0, 2113
test, psi of (20240201, 20240210) vs (20240211, 20240220) : 0.029409239407678388
test， grp_1, 1961
test, psi of (20240211, 20240220) vs (20240221, 20240301) : 0.019177802277231426
test， grp_2, 1861
test, psi of (20240221, 20240301) vs (20240302, 20240311) : 0.018247673697499835
test， grp_3, 1961
test, psi of (20240302, 20240311) vs (20240312, 20240315) : 0.01977585316229401
test， grp_4, 669
train， grp_0, 5015
train, psi of (20240201, 20240210) vs (20240211, 20240220) : 0.005751886890550901
train， grp_1, 4293
train, psi of (20240211, 20240220) vs (20240221, 20240301) : 0.013170697597993563
train， grp_2, 4686
train, psi of (20240221, 20240301) vs (20240302, 20240311) : 0.006657096350498805
train， grp_3, 4422
train, psi of (20240302, 20240311) vs (20240312, 20240315) : 0.007439670860694535
train， grp_4, 1568
psiTrainTest:0.0009105929275505782
psiTrainOot:0.0365487789124003
psiTestOot:0.036328448559228024
in calc_date_ranges_effects ...
44
0 - (20240201, 20240210)
5015
5015
train eff, auc:0.6063338031798553 - ks:0.15944166826390027
1 - (20240211, 20240220)
4293
4293
train eff, auc:0.5913545915214435 - ks:0.1487882158627431
2 - (20240221, 20240301)
4686
4686
train eff, auc:0.6103590330075479 - ks:0.1633344801661633
3 - (20240302, 20240311)
4422
4422
train eff, auc:0.5793235384144475 - ks:0.1146103896103896
4 - (20240312, 20240315)
1568
1568
train eff, auc:0.5997690477840135 - ks:0.14567541975565257
in calc_date_ranges_effects ...
44
0 - (20240201, 20240210)
2113
2113
valid eff, auc:0.5690984389933099 - ks:0.14788884260053425
1 - (20240211, 20240220)
1961
1961
valid eff, auc:0.532630064732678 - ks:0.09674471882575458
2 - (20240221, 20240301)
1861
1861
valid eff, auc:0.5736689887671885 - ks:0.12875417908151132
3 - (20240302, 20240311)
1961
1961
valid eff, auc:0.5585879618153675 - ks:0.09681666198447719
4 - (20240312, 20240315)
669
669
valid eff, auc:0.5794361481638943 - ks:0.1276239570601052
in calc_date_ranges_effects ...
16
0 - (20240316, 20240319)
2458
2458
oot eff, auc:0.5834752820046938 - ks:0.13394655159361046
1 - (20240320, 20240323)
2751
2751
oot eff, auc:0.5477099045918503 - ks:0.09710211070543401
2 - (20240324, 20240327)
2710
2710
oot eff, auc:0.5553351726067691 - ks:0.09994181890152037
3 - (20240328, 20240331)
3532
3532
oot eff, auc:0.5428783413973861 - ks:0.09978603286434315



>>>>>
fpd15_flag
learning_rate=0.01
n_estimators=60
max_depth=3
min_child_weight=100

train ROC: 0.6002560482380455
ks2_train:0.14474406896713382
test accuracy: 0.8986573263280794
test ROC: 0.5586696205176593
ks2_test:0.10108837065611176
oot accuracy: 0.9047244782115099
oot ROC: 0.5536203104374507
ks2_oot:0.09232532584961547
(0.1458, 0.1699, 0.1532, 1145, 0.1467, 1.54, 1.54, 0.0293)
(0.1425, 0.1458, 0.1441, 1145, 0.1039, 1.0908, 1.3154, 0.001)
(0.1407, 0.1425, 0.1415, 1145, 0.1074, 1.1275, 1.2528, 0.0019)
(0.1385, 0.1407, 0.1396, 1145, 0.0917, 0.9625, 1.1802, 0.0002)
(0.1363, 0.1385, 0.1375, 1145, 0.0847, 0.8892, 1.122, 0.0016)
(0.1326, 0.1363, 0.1343, 1145, 0.0961, 1.0083, 1.1031, 0.0)
(0.1301, 0.1326, 0.1312, 1145, 0.0847, 0.8892, 1.0725, 0.0016)
(0.1286, 0.1301, 0.1295, 1145, 0.0769, 0.8067, 1.0393, 0.005)
(0.1264, 0.1286, 0.1278, 1145, 0.0751, 0.7883, 1.0114, 0.0061)
(0.1224, 0.1264, 0.1248, 1146, 0.0855, 0.8976, 1.0, 0.0013)
FI,S02_n88,0.06484052
FI,S03_n46,0.06456399
FI,S02_n65,0.04938599
FI,S03_n66,0.04104959
FI,S03_n22,0.038408827
FI,S05_n9,0.037615966
FI,S03_n35,0.03609921
FI,S04_n21,0.03564861
FI,S02_n48,0.034789693
FI,S04_n16,0.034729443
FI,S03_n100,0.03416931
FI,S03_n106,0.033579838
FI,S03_n20,0.031985015
FI,S03_n90,0.03189819
FI,S03_n29,0.030764904
FI,S03_n11,0.030534582
FI,S03_n45,0.029472968
FI,S03_n25,0.025241109
FI,S03_n64,0.02493381
FI,S03_n28,0.024258314
FI,S05_n2,0.02404586
FI,S01_n11,0.022353275
FI,S02_n83,0.021905342
FI,S03_n50,0.02181494
FI,S03_n32,0.021246454
FI,S03_n51,0.02063424
FI,S01_n9,0.020136943
FI,S03_n7,0.019714486
FI,S03_n47,0.019711291
FI,S03_n4,0.018982561
FI,S01_n16,0.018573776
FI,S03_n70,0.018497014
FI,S04_n22,0.018413976
nF:33
oot， grp_0, 2458
oot, psi of (20240316, 20240319) vs (20240320, 20240323) : 0.011498579191125782
oot， grp_1, 2751
oot, psi of (20240320, 20240323) vs (20240324, 20240327) : 0.025328280744208925
oot， grp_2, 2710
oot, psi of (20240324, 20240327) vs (20240328, 20240331) : 0.10321379008985095
oot， grp_3, 3532
test， grp_0, 2113
test, psi of (20240201, 20240210) vs (20240211, 20240220) : 0.025569858829494283
test， grp_1, 1961
test, psi of (20240211, 20240220) vs (20240221, 20240301) : 0.016974216784039365
test， grp_2, 1861
test, psi of (20240221, 20240301) vs (20240302, 20240311) : 0.011370090621601327
test， grp_3, 1961
test, psi of (20240302, 20240311) vs (20240312, 20240315) : 0.02536072239950397
test， grp_4, 669
train， grp_0, 5015
train, psi of (20240201, 20240210) vs (20240211, 20240220) : 0.0035562216159264306
train， grp_1, 4293
train, psi of (20240211, 20240220) vs (20240221, 20240301) : 0.013536686800648661
train， grp_2, 4686
train, psi of (20240221, 20240301) vs (20240302, 20240311) : 0.004561568801833508
train， grp_3, 4422
train, psi of (20240302, 20240311) vs (20240312, 20240315) : 0.01292171348131304
train， grp_4, 1568
psiTrainTest:0.0009474672071946594
psiTrainOot:0.04018144254479326
psiTestOot:0.0364533134029481
in calc_date_ranges_effects ...
44
0 - (20240201, 20240210)
5015
5015
train eff, auc:0.6096730913669599 - ks:0.1603857938220532
1 - (20240211, 20240220)
4293
4293
train eff, auc:0.5956809474050854 - ks:0.15381231671554252
2 - (20240221, 20240301)
4686
4686
train eff, auc:0.6119226546206744 - ks:0.16566637358716568
3 - (20240302, 20240311)
4422
4422
train eff, auc:0.5824164261664262 - ks:0.12658730158730164
4 - (20240312, 20240315)
1568
1568
train eff, auc:0.6007829280121944 - ks:0.15097808263470291
in calc_date_ranges_effects ...
44
0 - (20240201, 20240210)
2113
2113
valid eff, auc:0.5693545225083931 - ks:0.13920405812728204
1 - (20240211, 20240220)
1961
1961
valid eff, auc:0.5330482964383707 - ks:0.09209355603505687
2 - (20240221, 20240301)
1861
1861
valid eff, auc:0.5758197294613007 - ks:0.15178284163554212
3 - (20240302, 20240311)
1961
1961
valid eff, auc:0.5553283316532536 - ks:0.10313761781215058
4 - (20240312, 20240315)
669
669
valid eff, auc:0.5725407875856937 - ks:0.12230961364723392
in calc_date_ranges_effects ...
16
0 - (20240316, 20240319)
2458
2458
oot eff, auc:0.5874659323188735 - ks:0.12541827541827538
1 - (20240320, 20240323)
2751
2751
oot eff, auc:0.5466372291661115 - ks:0.10068491325320472
2 - (20240324, 20240327)
2710
2710
oot eff, auc:0.5580463094244849 - ks:0.09176916606523453
3 - (20240328, 20240331)
3532
3532
oot eff, auc:0.5448086696624869 - ks:0.10073690449667277

'''