import pandas as pd
import os
import numpy as np
from datetime import datetime, timedelta
from sklearn.metrics import roc_auc_score, roc_curve, precision_score, recall_score, f1_score

def calculate_bad_ratio(y):
    """
    计算坏样本比例
    """
    return (y == 1).sum() / len(y) * 100

def load_data(folder_dir, sep):
    if len(sep) > 1:
        sep = ''.join(['\\' + s for s in sep])
    lst = [a for a in os.listdir(folder_dir) if a.endswith(".csv")]
    df_list = []
    for i in lst:
        df_temp = pd.read_csv(os.path.join(folder_dir, i), sep=sep, encoding='utf8')
        df_list.append(df_temp)
    df = pd.concat(df_list, axis=0)
    return df

def merge_and_deduplicate():
    # 读取两个文件
    df1 = pd.read_csv('DNNFraud/dataIn/Q4税务欺诈.txt', sep='\t')
    df2 = pd.read_csv('DNNFraud/dataIn/税务欺诈1128.txt', sep='\t')
    
    # 打印处理前的记录数
    print(f"Q4税务欺诈.txt 记录数: {len(df1)}")
    print(f"税务欺诈1128.txt 记录数: {len(df2)}")
    print(f"处理前总记录数: {len(df1) + len(df2)}")
    
    # 合并数据框
    df_merged = pd.concat([df1, df2], axis=0)
    
    # 去除重复记录
    df_deduped = df_merged.drop_duplicates()
    
    # 打印处理后的记录数
    print(f"\n去重后记录数: {len(df_deduped)}")
    print(f"删除的重复记录数: {len(df_merged) - len(df_deduped)}")
    
    # 保存结果
    df_deduped.to_csv('DNNFraud/dataOut/merged_TaxFraud_data.txt', sep='\t', index=False)
    print("\n合并后的数据已保存到 dataOut/merged_fraud_data.txt")
    
    return df_deduped

def process_excel():
    # 读取Excel文件的指定sheet
    df = pd.read_excel('DNNFraud/dataIn/王恣懿集中申请分析2025年1月向下取整.xlsx', 
                       sheet_name='生效的集中点位申请明细')
    
    # 筛选不良额非0的记录
    df_filtered = df[df['不良额'] != 0]
    
    # 打印处理前后的记录数
    print(f"\n原始记录数: {len(df)}")
    print(f"不良额非0的记录数: {len(df_filtered)}")
    
    # 保存结果
    df_filtered.to_excel('DNNFraud/dataOut/filtered_bad_debt.xlsx', index=False)
    print("\n已将不良额非0的记录保存到 dataOut/filtered_bad_debt.xlsx")
    
    return df_filtered

def process_lake_data():
    # 设置分隔符
    sep = '\x7f\x5e'
    
    # 读取数据湖数据
    folder_dir = 'DNNFraud/dataIn/2025-02-09'
    df = load_data(folder_dir, sep)
    
    # 打印数据信息
    print("\n数据湖数据信息:")
    print(f"记录数: {len(df)}")
    print(f"列数: {len(df.columns)}")
    print("\n列名:")
    print(df.columns.tolist())
    print(df['flag_all'].value_counts(dropna=True).sort_index())
    
    # 标记特定申请件（陈晓蛟给的手工标记的反欺诈名单）
    special_app_nums = [
        '20240521392393', '20240487866120', '20230470576970',
        '20230456483305', '20230455796413', '20230453767992',
        '20230426795275', '20240531972856', '20230463118692',
        '20230463918576', '20230462433799', '20230443431811',
        '20230446521241', '20230462433799', '20230413925989',
        '20230476647475'
    ]
    
    
    # 将这些申请件的flag_all标记为1
    df.loc[df['app_num'].isin(special_app_nums), 'flag_all'] = 1
    
    # 打印更新后的标记统计
    print("\n更新后的flag_all分布:")
    print(df['flag_all'].value_counts(dropna=True).sort_index())
    
    return df

def calculate_ks_auc(df, score_col, label_col):
    """
    计算KS和AUC值
    
    参数:
    df: DataFrame
    score_col: 分数列名
    label_col: 标签列名
    """
    # 不再需要删除缺失值，因为已经填充了
    df_clean = df
    
    # 计算AUC
    auc = roc_auc_score(df_clean[label_col], df_clean[score_col])
    
    # 计算KS
    fpr, tpr, thresholds = roc_curve(df_clean[label_col], df_clean[score_col])
    ks = max(abs(tpr - fpr))
    
    return ks, auc

def load_lost_contact_data():
    """读取失联数据"""
    # 读取失联数据文件
    sep = '\x7f\x5e'
    
    # 读取数据湖数据
    folder_dir = 'DNNFraud/dataIn/pxwzb_pub__MANUAL_CALL_lost/2025-03-05'
    lost_df = load_data(folder_dir, sep)
    
    
    print(lost_df.columns.tolist())
    # 筛选条件：CALL_STATUS_LEV1=1 且 end_dt为3000-12-31的记录
    lost_df = lost_df[  
        (lost_df['CALL_STATUS_LEV1'] == 1) & 
        (lost_df['END_DT'] == '3000-12-31')
    ]
    
    # 获取符合条件的客户号列表
    lost_customers = lost_df['CUST_NO'].unique().tolist()
    
    return lost_customers

def load_business_change_data():
    """读取工商变更文件并处理"""
    # 使用load_data函数读取文件
    folder_dir = 'DNNFraud/dataIn/2025-2-28'
    sep = '\x7f\x5e'
    df = load_data(folder_dir, sep)
    
    # 检查并重命名相关字段
    column_mapping = {
        '主申请人统一社会信用代码': 'CREDIT_CODE',
        '申请编号': 'APPLY_NO',
        '申请日期': 'APPLY_DATE',
        '变更日期': 'ALTDATE',
        '变更事项': 'ALTITEM',
        '登记日期': 'ESDATE'
    }
    
    # 重命名列名
    df.rename(columns=column_mapping, inplace=True)
    
    # 转换日期列
    df['ESDATE'] = pd.to_datetime(df['ESDATE'])
    df['ALTDATE'] = pd.to_datetime(df['ALTDATE'])
    df['APPLY_DATE'] = pd.to_datetime(df['APPLY_DATE'])
    
    # 获取20241231的近2年的日期范围
    # latest_date = pd.to_datetime('2024-12-31')
    two_years_ago = df['APPLY_DATE'] - timedelta(days=365*2)
    
    # 筛选近2年数据
    df_recent = df[(df['ESDATE'] >= two_years_ago) &  (df['ESDATE'] <= (df['APPLY_DATE']))]
    
    # 打印统计信息
    print(f"\n=== 工商变更分析结果 ===")
    print(f"处理前总记录数: {len(df)}")
    print(f"近2年记录数: {len(df_recent)}")
    return df_recent

def process_business_changes(filtered_df):
    """处理工商变更数据，返回严重逾期的申请编号列表"""
    serious_overdue_apps = []
    
    # 如果输入数据为空，直接返回空列表
    if len(filtered_df) == 0:
        print("警告: 工商变更数据为空")
        return serious_overdue_apps
        
    try:
        # 读取变更分类Excel文件
        df_s = pd.read_excel('DNNFraud/dataIn/韩博华工商变更重大变更数据时点24年12月31日/工商变更细分.xlsx')
        print(f"变更分类表行数: {df_s.shape[0]}")
    except Exception as e:
        print(f"读取变更分类Excel文件出错: {e}")
        return serious_overdue_apps
    
    try:
        # 检查ALTBE和ALTAF列是否存在
        if 'ALTBE' not in filtered_df.columns:
            filtered_df['ALTBE'] = ''
        if 'ALTAF' not in filtered_df.columns:
            filtered_df['ALTAF'] = ''
        
        # 关联工商变更大类信息
        df = pd.merge(filtered_df, df_s, how='left', left_on='ALTITEM', right_on='变更事项')
        
        # 定义重大变更事项列表
        major_changes = [
            '企业类型变更',
            '出资额变更',
            '投资人变更',
            '法定代表人变更',
            '高级管理人员变更'
        ]
        
        # 处理变更大类为空的记录
        df['变更大类'] = df.apply(lambda x: '其他事项变更' if pd.notna(x['ALTITEM']) and pd.isna(x['变更大类']) else x['变更大类'], axis=1)
        
        # 为所有记录添加result_tr列
        df['result_tr'] = 1
        
        # 创建透视表
        # 首先创建APPLY_NO和CREDIT_CODE的映射
        credit_map = df[['APPLY_NO', 'CREDIT_CODE']].drop_duplicates()
        
        # 创建透视表
        df_p = pd.pivot_table(df, index=['APPLY_NO'], 
                            columns='变更大类', 
                            values='result_tr', 
                            aggfunc='sum').reset_index()
        
        # 将CREDIT_CODE字段添加到透视表
        df_p = pd.merge(df_p, credit_map, how='left', on='APPLY_NO')
        
        # 处理空值
        for category in major_changes:
            if category in df_p.columns:
                df_p[category] = df_p[category].fillna(0)
                df_p[category] = df_p[category].apply(lambda x: 1 if x > 0 else 0)
            else:
                df_p[category] = 0
        
        # 筛选出符合重大变更条件的企业
        hit_apply_nos = set()
        for index, row in df_p.iterrows():
            has_major_change = False
            for change in major_changes:
                if change in df_p.columns and row[change] == 1:
                    has_major_change = True
                    hit_apply_nos.add(row['APPLY_NO'])
                    break
        
        # 获取命中重大变更的记录
        hit_records = filtered_df[filtered_df['APPLY_NO'].isin(hit_apply_nos)]
        
        # 获取严重逾期的申请编号
        serious_overdue = hit_records[
            # (hit_records['不良额'] > 0) | 
            # (hit_records['当前逾期天数'] > 30)
            (hit_records['当前逾期天数'] >= 15 )
        ]
        serious_overdue_apps = serious_overdue['APPLY_NO'].unique()
        print(f"\n工商变更严重逾期申请件数: {len(serious_overdue_apps)}")
        
    except Exception as e:
        print(f"处理工商变更数据时出错: {e}")
        
    return serious_overdue_apps

def process_final_data():
    """主数据处理函数"""
    # 1. 读取app_data作为主表
    try:
        app_data = pd.read_csv('DNNFraud/dataIn/杨承林宽表数据时点25年2月28日/APP_0303_DATA.csv', 
                              sep=',',
                              encoding='utf-8')
        
        print("\n=== APP_0303_DATA.csv数据信息 ===")
        print(f"原始记录数: {len(app_data)}")
        print(f"原始字段数: {len(app_data.columns)}")

        print("app_data记录数:",app_data.shape[0])
        app_data = app_data[app_data['APPLY_STS'] == 'COMP']
        print("app_data记录数:",app_data.shape[0])
        # 初始化flag_all字段为-1
        app_data['flag_all'] = -1
        
    except Exception as e:
        print(f"\n错误: 读取APP_0303_DATA.csv时出错: {e}")
        raise
    
    # 2. 获取数据湖数据并进行打标
    df_lake = process_lake_data()
    
    # 将df_lake的flag_all直接赋值给app_data
    lake_flag_map = df_lake.set_index('app_num')['flag_all']
    app_data.loc[app_data['APPLY_NO'].isin(df_lake['app_num']), 'flag_all'] = \
        app_data[app_data['APPLY_NO'].isin(df_lake['app_num'])]['APPLY_NO'].map(lake_flag_map)
    
    # 3. 获取其他数据源并进行打标
    # 获取集中点位
    df_filtered = process_excel()
    
    # 获取税务欺诈
    df_deduped = merge_and_deduplicate()
    
    # 获取失联客户列表
    lost_customers = load_lost_contact_data()
    
    # 处理工商变更数据
    filtered_df = load_business_change_data()
    serious_overdue_apps = process_business_changes(filtered_df)
    
    # 筛选出客户分层>=3的记录
    df_deduped_high_risk = df_deduped[df_deduped['客户分层'] >= 3]
    
    # 打印客户分层筛选信息
    print("\n客户分层筛选信息:")
    print(f"原始欺诈记录数: {len(df_deduped)}")
    print(f"客户分层>=3的记录数: {len(df_deduped_high_risk)}")
    print(f"失联客户数量: {len(lost_customers)}")
    
    # 4. 对app_data进行打标
    # 创建打标掩码
    mask_filtered = app_data['APPLY_NO'].isin(df_filtered['申请书编号'])
    mask_high_risk = app_data['APPLY_NO'].isin(df_deduped_high_risk['APP_NUM'])
    mask_lost = app_data['BP_CUST_NO'].isin(lost_customers)  # 注意这里使用BP_CUST_NO对应host_cust_id
    mask_business_overdue = app_data['APPLY_NO'].isin(serious_overdue_apps)
    
    print("\n各条件匹配统计:")
    print(f"匹配集中点位记录数: {mask_filtered.sum()}")
    print(f"匹配税务欺诈记录数: {mask_high_risk.sum()}")
    print(f"匹配失联客户记录数: {mask_lost.sum()}")
    print(f"匹配工商变更严重逾期记录数: {mask_business_overdue.sum()}")
    
    # 使用逻辑或(|)合并所有条件进行打标
    mask = mask_filtered | mask_high_risk | mask_lost | mask_business_overdue
    app_data.loc[mask, 'flag_all'] = 1
    
    # 5. 读取建信金科诚信度评分数据并合并
    df_score = pd.read_csv('DNNFraud/dataIn/建信金科诚信度评分SCORE_VAR_RST.csv')
    
    # 检查一对一关系
    apply_no_counts = df_score.groupby('APPLY_NO').size()
    if apply_no_counts.max() > 1:
        print("\n警告：建信金科诚信度评分数据中存在重复的APPLY_NO")
        print("正在删除重复记录...")
        df_score = df_score.drop_duplicates(subset=['APPLY_NO'], keep='first')
    
    # 删除df_score中的APPLY_STS列，避免重复
    if 'APPLY_STS' in df_score.columns:
        df_score = df_score.drop('APPLY_STS', axis=1)
    
    # 合并数据
    final_df = pd.merge(app_data, df_score, on='APPLY_NO', how='inner')
    
    # 删除flag_all不为0和1的记录
    original_len = len(final_df)
    final_df = final_df[final_df['flag_all'].isin([0, 1])]
    removed_len = original_len - len(final_df)
    
    print("\nflag_all数据清洗:")
    print(f"原始记录数: {original_len}")
    print(f"删除flag_all非0/1的记录数: {removed_len}")
    print(f"保留记录数: {len(final_df)}")
    
    # 用-999999填充SCORE_ANTI_FRAUD的缺失值
    missing_count = final_df['SCORE_ANTI_FRAUD'].isna().sum()
    if missing_count > 0:
        print(f"\n填充SCORE_ANTI_FRAUD缺失值:")
        print(f"缺失值数量: {missing_count}")
        final_df['SCORE_ANTI_FRAUD'] = final_df['SCORE_ANTI_FRAUD'].fillna(-999999)
        print("已用-999999填充缺失值")
    
    # 打印处理结果
    print("\n最终数据处理结果:")
    print(f"原始app_data记录数: {len(app_data)}")
    print(f"flag_all=1的记录数: {final_df['flag_all'].sum()}")
    print(f"最终合并后的记录数: {len(final_df)}")
    
    # 打印按flag_all分组的统计信息
    flag_counts = final_df['flag_all'].value_counts(dropna=True).sort_index()
    print("\nflag_all分组统计:")
    for flag_value, count in flag_counts.items():
        print(f"flag_all = {flag_value}: {count}条记录")
    
    # 打印按flag_all和APPLY_STS分组的统计信息
    cross_tab = pd.crosstab(final_df['flag_all'], final_df['APPLY_STS'], margins=True)
    print("\nflag_all和APPLY_STS交叉统计:")
    print(cross_tab)
    
    # 计算各组占比
    print("\n各组占比:")
    percentages = cross_tab.div(cross_tab['All'], axis=0) * 100
    print(percentages.round(2))
    
    # 保存结果
    final_df.to_csv('DNNFraud/dataOut/final_processed_data.csv', index=False)
    print("\n最终处理后的数据已保存到 dataOut/final_processed_data.csv")
    
    return final_df

def union_with_sample(final_df):
    """
    将final_df与样本数据合并
    """
    # 读取样本数据
    sample_df = pd.read_csv('DNNFraud/SAMPLE_S4_MODEL.csv')
    
    # 修改字段映射关系，删除重复的映射
    model_pro_dict = {
        'APPLY_DATE': 'APP_DT',
        'perBrsM6ldMaxlnteday': 'br_var11',
        'perBrsM12ldCaonAllnum': 'br_var81',
        'perBrsM1IdBankNightOrgnum': 'br_var102',
        'perPbcPctLoan': 'gPctLoan',
        'perPbcNumNBLon': 'gNumNBLon',
        'perPbcNetFractionRvl': 'gNetFractionRvl',
        'perPbcPctTLM1Last12Mons': 'gPctTLM1Last12Mons',
        'perPbcTimesDelqLast12Mons': 'gTimesDelqLast12Mons',
        'perPbcNumlnqrsNBPPLast12Mons': 'gNuminqrsNBPPLast12Mons',
        'perPbcLoanPrdAmt': 'PL_AMT',
        'perPbcUtil75CC6M': 'P_UTL75_CC_6M',
        'perPbcCheckOrgCnt12M': 'CHECK_ORG_CNT_12M'
    }
    
    # 重命名sample_df的APPLY_NO列
    sample_df = sample_df.rename(columns={'APPLY_NO': 'APPLY_NO'})  # 保持APPLY_NO不变
    
    # 获取需要保留的字段列表
    common_cols = [col for col in final_df.columns if col in sample_df.columns]
    mapping_cols_final = [col for col in model_pro_dict.keys() if col in final_df.columns]
    
    # 重命名sample_df的其他列名以匹配final_df
    sample_df_renamed = sample_df.copy()
    rename_dict = {v: k for k, v in model_pro_dict.items()}
    sample_df_renamed = sample_df_renamed.rename(columns=rename_dict)
    
    # 选择需要的列
    final_df_selected = final_df[common_cols + mapping_cols_final].copy()
    sample_df_selected = sample_df_renamed[common_cols + mapping_cols_final].copy()
    
    # 添加modPro标记
    final_df_selected.loc[:, 'modPro'] = 1
    sample_df_selected.loc[:, 'modPro'] = 0
    
    # 合并数据
    merged_df = pd.concat([final_df_selected, sample_df_selected], axis=0)
    
    # 检查APPLY_NO的重复记录
    print("\nAPPLY_NO重复记录检查:")
    try:
        duplicates = merged_df.groupby('APPLY_NO', as_index=False).size()
        duplicate_counts = duplicates[duplicates['size'] > 1]
        
        if len(duplicate_counts) > 0:
            print(f"发现{len(duplicate_counts)}个重复的APPLY_NO")
            print("\n重复记录的详细信息:")
            print("重复次数统计:")
            print(duplicate_counts['size'].value_counts().sort_index())
            
            # 显示重复记录的modPro分布
            print("\n重复记录的modPro分布:")
            for apply_no in duplicate_counts['APPLY_NO']:
                dup_records = merged_df[merged_df['APPLY_NO'] == apply_no]
                print(f"\nAPPLY_NO: {apply_no}")
                print("modPro分布:")
                print(dup_records['modPro'].value_counts())
        else:
            print("没有发现重复的APPLY_NO")
    except Exception as e:
        print(f"检查重复记录时出错: {str(e)}")
        print("\nAPPLY_NO列的前几个值:")
        print(merged_df['APPLY_NO'].head())
    
    # 打印合并信息
    print("\n数据合并统计:")
    print(f"final_df记录数: {len(final_df)}")
    print(f"sample_df记录数: {len(sample_df)}")
    print(f"合并后总记录数: {len(merged_df)}")
    print(f"\n保留的字段数: {len(common_cols + mapping_cols_final)}")
    print("\nmodPro分布:")
    print(merged_df['modPro'].value_counts())
    
    # 打印按flag_all和APPLY_STS分组的统计信息
    cross_tab = pd.crosstab(merged_df['flag_all'], merged_df['APPLY_STS'], margins=True)
    print("\nflag_all和APPLY_STS交叉统计:")
    print(cross_tab)
    
    # 计算各组占比
    print("\n各组占比:")
    percentages = cross_tab.div(cross_tab['All'], axis=0) * 100
    print(percentages.round(2))
    
    # 保存结果
    merged_df.to_csv('DNNFraud/dataOut/merged_with_sample.csv', index=False)
    print("\n合并后的数据已保存到 dataOut/merged_with_sample.csv")
    
    return merged_df

def print_stats(df, name):
    """
    打印数据集的统计信息
    """
    bad_count = (df['flag_all'] == 1).sum()
    total_count = len(df)
    bad_ratio = bad_count / total_count * 100
    print(f"\n{name}统计:")
    print(f"总记录数: {total_count:,}")
    print(f"坏客户数: {bad_count:,}")
    print(f"坏客户比例: {bad_ratio:.2f}%")

def split_data(final_df):
    """
    按时间和坏客户比例拆分数据集，并与样本数据合并
    """
    print("\n=== 开始数据拆分和合并 ===")
    print_stats(final_df, "原始final_df")
    
    # 按时间排序
    final_df_sorted = final_df.sort_values('APP_DT')
    
    # 获取坏客户数据
    bad_cases = final_df_sorted[final_df_sorted['flag_all'] == 1]
    split_index = int(len(bad_cases) * 0.8)
    
    # 获取分割时间点
    split_date = bad_cases.iloc[split_index]['APP_DT']
    print(f"\n分割时间点: {split_date}")
    #打印分割时间点之后各个时间点的好客户数和坏客户数
    print(final_df_sorted[final_df_sorted['APP_DT'] > split_date]['flag_all'].value_counts())
    # 按时间分割数据
    train_df = final_df_sorted[final_df_sorted['APP_DT'] <= split_date]
    val_df = final_df_sorted[final_df_sorted['APP_DT'] > split_date]
    
    # 打印分割后的统计
    print("\n=== 时间分割后数据统计 ===")
    print_stats(train_df, "训练集")
    print_stats(val_df, "验证集")
    
    # 获取modPro=0的样本数据
    # sample_data = merged_df[merged_df['modPro'] == 0].copy()
    # print_stats(sample_data, "样本数据(modPro=0)")
    
    # 合并训练集
    # final_train_df = pd.concat([train_df, sample_data])
    # print("\n=== 最终数据集统计 ===")
    # print_stats(final_train_df, "合并后的训练集")
    # print_stats(val_df, "最终验证集")
    
    # 保存结果
    # final_train_df.to_csv('DNNFraud/dataOut/final_train_data.csv', index=False)
    # val_df.to_csv('DNNFraud/dataOut/final_validation_data.csv', index=False)
    # print("\n最终数据集已保存到 dataOut/final_train_data.csv 和 final_validation_data.csv")
    
    # return final_train_df, val_df
    return train_df, val_df

def split_and_merge_data2(final_df,merged_df):
    """
    按时间和坏客户比例拆分数据集，并与样本数据合并
    """
    print("\n=== 开始数据拆分和合并 ===")
    print_stats(final_df, "原始final_df")
    
    # 按时间排序
    final_df_sorted = final_df.sort_values('APP_DT')
    
    # 获取坏客户数据
    bad_cases = final_df_sorted[final_df_sorted['flag_all'] == 1]
    split_index = int(len(bad_cases) * 0.6)
    
    # 获取分割时间点
    split_date = bad_cases.iloc[split_index]['APP_DT']
    print(f"\n分割时间点: {split_date}")
    
    # 按时间分割数据
    train_df = final_df_sorted[final_df_sorted['APP_DT'] <= split_date]
    val_df = final_df_sorted[final_df_sorted['APP_DT'] > split_date]
    
    # 打印分割后的统计
    print("\n=== 时间分割后数据统计 ===")
    print_stats(train_df, "训练集")
    print_stats(val_df, "验证集")
    
    # 获取modPro=0的样本数据
    sample_data = merged_df[merged_df['modPro'] == 0].copy()
    print_stats(sample_data, "样本数据(modPro=0)")
    
    # 合并训练集
    final_train_df = pd.concat([train_df, sample_data])
    print("\n=== 最终数据集统计 ===")
    print_stats(final_train_df, "合并后的训练集")
    print_stats(val_df, "最终验证集")
    
    # 保存结果
    final_train_df.to_csv('DNNFraud/dataOut/final_train_data.csv', index=False)
    val_df.to_csv('DNNFraud/dataOut/final_validation_data.csv', index=False)
    print("\n最终数据集已保存到 dataOut/final_train_data.csv 和 final_validation_data.csv")
    
    return final_train_df, val_df

def evaluate_score_var_rst(val_df):
    """评估建信金科评分在验证集上的表现"""
    # 读取建信金科诚信度评分数据
    score_df = pd.read_csv('DNNFraud/dataIn/建信金科诚信度评分SCORE_VAR_RST.csv')
    
    # 将验证集的APPLY_NO与score_df的APPLY_NO进行匹配
    intersection_df = pd.merge(
        val_df[['APPLY_NO', 'flag_all']], 
        score_df[['APPLY_NO', 'SCORE_ANTI_FRAUD']], 
        on='APPLY_NO',  # 使用on而不是left_on和right_on
        how='inner'
    )
    
    # 打印交集数据集信息
    print("\n=== 交集数据集统计 ===")
    print(f"交集记录数: {len(intersection_df):,}")
    
    intersection_df['SCORE_ANTI_FRAUD'].fillna(-999999, inplace=True)
    
    # 计算AUC和KS
    auc = roc_auc_score(intersection_df['flag_all'], intersection_df['SCORE_ANTI_FRAUD'])
    fpr, tpr, thresholds = roc_curve(intersection_df['flag_all'], intersection_df['SCORE_ANTI_FRAUD'])
    ks = max(abs(tpr - fpr))
    
    # 找到最优阈值
    best_threshold = thresholds[np.argmax(abs(tpr - fpr))]
    
    # 使用最优阈值进行预测
    y_pred = (intersection_df['SCORE_ANTI_FRAUD'] >= best_threshold).astype(int)
    
    # 计算其他指标
    precision = precision_score(intersection_df['flag_all'], y_pred)
    recall = recall_score(intersection_df['flag_all'], y_pred)
    f1 = f1_score(intersection_df['flag_all'], y_pred)
    
    print("\n建信金科评分在验证集交集上的表现:")
    print(f"AUC值: {auc:.4f}")
    print(f"KS值: {ks:.4f}")
    print(f"查准率 (Precision): {precision:.4f}")
    print(f"查全率 (Recall): {recall:.4f}")
    print(f"F1 Score: {f1:.4f}")
    
    return intersection_df, ks, auc

def calculate_psi(train_df, val_df):
    """
    计算PSI值
    """
    # 计算训练集和验证集的坏客户比例
    train_bad_ratio = (train_df['flag_all'] == 1).mean()
    val_bad_ratio = (val_df['flag_all'] == 1).mean()
    
    # 计算PSI值
    psi = abs(train_bad_ratio - val_bad_ratio)
    return psi  

def calculate_psi2(train_df, val_df, col):
    """计算PSI值"""
    import numpy as np
    
    # 1. 分箱：使用等频分箱，分10个箱
    bins = np.percentile(train_df[col], np.linspace(0, 100, 11))
    bins[0] = float('-inf')
    bins[-1] = float('inf')
    
    # 2. 计算占比
    train_counts, _ = np.histogram(train_df[col], bins=bins)
    val_counts, _ = np.histogram(val_df[col], bins=bins)
    
    train_pct = train_counts / len(train_df)
    val_pct = val_counts / len(val_df)
    
    # 3&4. 计算PSI：处理0值情况，避免除0和log(0)
    eps = 1e-10
    train_pct = np.maximum(train_pct, eps)
    val_pct = np.maximum(val_pct, eps)
    
    psi = np.sum((val_pct - train_pct) * np.log(val_pct / train_pct))
    
    return psi

def undersample_by_date(df, target_ratio=20):
    """
    按日期字段进行欠采样，保持每天的正负样本比例为1:target_ratio，保证正样本数不变
    
    Args:
        df: 输入数据框
        target_ratio: 目标负正样本比例，默认20:1
    """
    # 检查日期列
    date_cols = [col for col in df.columns if 'DT' in col or 'DATE' in col]
    if not date_cols:
        raise ValueError("未找到日期列，请确保数据中包含日期信息")
    date_col = date_cols[0]
    print(f"使用日期列: {date_col}")
    
    # 先对每个客户号只保留最新的一笔记录
    df = df.sort_values(date_col).groupby('host_cust_id').last().reset_index()
    
    # 先保存所有正样本
    pos_samples = df[df['flag_all'] == 1]
    neg_samples = df[df['flag_all'] == 0]
    
    # 计算需要的总负样本数
    total_pos = len(pos_samples)
    total_neg_target = total_pos * target_ratio
    
    # 按日期分组统计正样本数
    daily_pos_counts = pos_samples.groupby(date_col).size()
    
    sampled_neg_dfs = []
    remaining_neg_target = total_neg_target
    
    # 第一轮：按日期采样
    for date in daily_pos_counts.index:
        day_neg = neg_samples[neg_samples[date_col] == date]
        n_pos = daily_pos_counts[date]
        n_neg_target = min(n_pos * target_ratio, remaining_neg_target)
        
        if len(day_neg) > n_neg_target:
            day_neg = day_neg.sample(n=int(n_neg_target), random_state=42)
        
        sampled_neg_dfs.append(day_neg)
        remaining_neg_target -= len(day_neg)
    
    # 第二轮：如果还需要更多负样本，从剩余的负样本中随机采样
    if remaining_neg_target > 0:
        used_neg_indices = pd.concat(sampled_neg_dfs).index
        remaining_neg = neg_samples.loc[~neg_samples.index.isin(used_neg_indices)]
        
        if len(remaining_neg) > 0:
            additional_samples = remaining_neg.sample(
                n=min(int(remaining_neg_target), len(remaining_neg)),
                random_state=42
            )
            sampled_neg_dfs.append(additional_samples)
    
    # 合并所有采样后的负样本
    sampled_neg = pd.concat(sampled_neg_dfs)
    
    # 合并正样本和采样后的负样本
    result_df = pd.concat([pos_samples, sampled_neg])
    
    # 打印采样结果
    print(f"\n=== 欠采样结果 ===")
    print(f"采样前总样本数: {len(df)}")
    print(f"采样后总样本数: {len(result_df)}")
    print(f"采样前正样本数: {len(pos_samples)}")
    print(f"采样后正样本数: {len(result_df[result_df['flag_all'] == 1])}")
    print(f"采样前负正样本比: {len(neg_samples) / len(pos_samples):.2f}")
    print(f"采样后负正样本比: {len(sampled_neg) / len(pos_samples):.2f}")
    
    return result_df.sort_values(date_col)

def check_duplicates(df, name):
    """检查数据集中客户号的重复情况"""
    print(f"\n=== {name}客户号重复情况 ===")
    # 统计重复次数
    dup_counts = df['host_cust_id'].value_counts()
    dup_stats = dup_counts[dup_counts > 1]
    
    if len(dup_stats) > 0:
        print(f"存在重复客户号数量: {len(dup_stats)}")
        print(f"重复客户号的分布情况:")
        print(dup_stats.value_counts().sort_index())
        print("\n出现次数最多的前5个客户号:")
        print(dup_stats.head())
        
        # 检查重复客户的标签分布
        for cust_id in dup_stats.head().index:
            dup_records = df[df['host_cust_id'] == cust_id]
            print(f"\n客户号 {cust_id} 的记录:")
            print(f"总记录数: {len(dup_records)}")
            print(f"标签分布:\n{dup_records['flag_all'].value_counts()}")
            print(f"申请日期分布:\n{dup_records['APP_DT'].value_counts().sort_index()}")
    else:
        print("没有重复的客户号")

if __name__ == "__main__":

    print(os.getcwd())
    final_df = process_final_data()
    
    # 检查缺失值
    print("\nSCORE_ANTI_FRAUD缺失值统计:")
    print(final_df['SCORE_ANTI_FRAUD'].isna().sum(), "条记录存在缺失")
    
    
    # 计算并打印final_df的SCORE_ANTI_FRAUDKS和AUC
    ks, auc = calculate_ks_auc(final_df, 'SCORE_ANTI_FRAUD', 'flag_all')
    print("\n模型评估指标:")
    print(f"KS值: {ks:.4f}")
    print(f"AUC值: {auc:.4f}")
        
    # 与样本数据合并
    # merged_df = union_with_sample(final_df)
    
    # 按时间和坏客户比例拆分数据集
    train_df, val_df = split_data(final_df)
    
    #对train_df, val_df进行欠采样：flag_all=1的样本不动，flag_all=0的样本按照日期字段按日欠采样（flag_all为0的样本：flag_all为1的样本=10：1）
    train_df = undersample_by_date(train_df)
    val_df = undersample_by_date(val_df)
    
    # 检查客户号重复情况
    check_duplicates(train_df, "训练集")
    check_duplicates(val_df, "验证集")
    
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)

    #请给出train_df, val_df的样本分布差异，并给出差异的统计指标psi
    psi = calculate_psi(train_df, val_df)
    print(f"PSI值: {psi:.4f}")
    #计算per开头的变量的psi 
    per_cols = [col for col in train_df.columns if col.startswith('per')]
    for col in per_cols:
        psi = calculate_psi2(train_df, val_df, col)
        print(f"{col}的PSI值: {psi:.4f}")

    



    


    

