import pandas as pd
import os
from sklearn.metrics import roc_auc_score, roc_curve

def load_data(folder_dir, sep):
    if len(sep) > 1:
        sep = ''.join(['\\' + s for s in sep])
    lst = [a for a in os.listdir(folder_dir) if a.endswith(".csv")]
    df_list = []
    for i in lst:
        df_temp = pd.read_csv(os.path.join(folder_dir, i), sep=sep, encoding='utf8')
        df_list.append(df_temp)
    df = pd.concat(df_list, axis=0)
    return df

def merge_and_deduplicate():
    # 读取两个文件
    df1 = pd.read_csv('DNNFraud/dataIn/Q4税务欺诈.txt', sep='\t')
    df2 = pd.read_csv('DNNFraud/dataIn/税务欺诈1128.txt', sep='\t')
    
    # 打印处理前的记录数
    print(f"Q4税务欺诈.txt 记录数: {len(df1)}")
    print(f"税务欺诈1128.txt 记录数: {len(df2)}")
    print(f"处理前总记录数: {len(df1) + len(df2)}")
    
    # 合并数据框
    df_merged = pd.concat([df1, df2], axis=0)
    
    # 去除重复记录
    df_deduped = df_merged.drop_duplicates()
    
    # 打印处理后的记录数
    print(f"\n去重后记录数: {len(df_deduped)}")
    print(f"删除的重复记录数: {len(df_merged) - len(df_deduped)}")
    
    # 保存结果
    df_deduped.to_csv('DNNFraud/dataOut/merged_TaxFraud_data.txt', sep='\t', index=False)
    print("\n合并后的数据已保存到 dataOut/merged_fraud_data.txt")
    
    return df_deduped

def process_excel():
    # 读取Excel文件的指定sheet
    df = pd.read_excel('DNNFraud/dataIn/王恣懿集中申请分析2025年1月向下取整.xlsx', 
                       sheet_name='生效的集中点位申请明细')
    
    # 筛选不良额非0的记录
    df_filtered = df[df['不良额'] != 0]
    
    # 打印处理前后的记录数
    print(f"\n原始记录数: {len(df)}")
    print(f"不良额非0的记录数: {len(df_filtered)}")
    
    # 保存结果
    df_filtered.to_excel('DNNFraud/dataOut/filtered_bad_debt.xlsx', index=False)
    print("\n已将不良额非0的记录保存到 dataOut/filtered_bad_debt.xlsx")
    
    return df_filtered

def process_lake_data():
    # 设置分隔符
    sep = '\x7f\x5e'
    
    # 读取数据湖数据
    folder_dir = 'DNNFraud/dataIn/2025-02-09'
    df = load_data(folder_dir, sep)
    
    # 打印数据信息
    print("\n数据湖数据信息:")
    print(f"记录数: {len(df)}")
    print(f"列数: {len(df.columns)}")
    print("\n列名:")
    print(df.columns.tolist())
    print(df['flag_all'].value_counts(dropna=True).sort_index())
    
    return df

def calculate_ks_auc(df, score_col, label_col):
    """
    计算KS和AUC值
    
    参数:
    df: DataFrame
    score_col: 分数列名
    label_col: 标签列名
    """
    # 不再需要删除缺失值，因为已经填充了
    df_clean = df
    
    # 计算AUC
    auc = roc_auc_score(df_clean[label_col], df_clean[score_col])
    
    # 计算KS
    fpr, tpr, thresholds = roc_curve(df_clean[label_col], df_clean[score_col])
    ks = max(abs(tpr - fpr))
    
    return ks, auc

def process_final_data():
    # 获取所有需要的数据框
    df_lake = process_lake_data()
    df_filtered = process_excel()
    df_deduped = merge_and_deduplicate()
    
    # 筛选出客户分层>=3的记录
    df_deduped_high_risk = df_deduped[df_deduped['客户分层'] >= 3]
    
    # 打印客户分层筛选信息
    print("\n客户分层筛选信息:")
    print(f"原始欺诈记录数: {len(df_deduped)}")
    print(f"客户分层>=3的记录数: {len(df_deduped_high_risk)}")
    
    # 分别统计两个条件的记录数
    mask_filtered = df_lake['app_num'].isin(df_filtered['申请书编号'])
    mask_high_risk = df_lake['app_num'].isin(df_deduped_high_risk['APP_NUM'])
    
    print("\n各条件匹配统计:")
    print(f"匹配集中点位记录数: {mask_filtered.sum()}")
    print(f"匹配税务欺诈记录数: {mask_high_risk.sum()}")
    print(f"同时满足两个条件的记录数: {(mask_filtered & mask_high_risk).sum()}")
    
    # 根据条件设置flag_all为1
    mask = mask_filtered | mask_high_risk
    df_lake.loc[mask, 'flag_all'] = 1
    
    # 打印合并前的flag_all统计
    print("\n合并前df_lake的flag_all统计:")
    print(df_lake['flag_all'].value_counts(dropna=True).sort_index())
    
    
    # 读取建信金科诚信度评分数据
    df_score = pd.read_csv('DNNFraud/dataIn/建信金科诚信度评分SCORE_VAR_RST.csv')
    
    # 检查一对一关系
    apply_no_counts = df_score.groupby('APPLY_NO').size()
    if apply_no_counts.max() > 1:
        print("\n警告：建信金科诚信度评分数据中存在重复的APPLY_NO")
        print("正在删除重复记录...")
        df_score = df_score.drop_duplicates(subset=['APPLY_NO'], keep='first')
    
    # 删除df_score中的APPLY_STS列，避免重复
    if 'APPLY_STS' in df_score.columns:
        df_score = df_score.drop('APPLY_STS', axis=1)
    
    # 合并数据 - 使用app_num作为关联键
    final_df = pd.merge(df_lake, df_score, left_on='app_num', right_on='APPLY_NO', how='inner')
    
    # 删除flag_all不为0和1的记录
    original_len = len(final_df)
    final_df = final_df[final_df['flag_all'].isin([0, 1])]
    removed_len = original_len - len(final_df)
    
    print("\nflag_all数据清洗:")
    print(f"原始记录数: {original_len}")
    print(f"删除flag_all非0/1的记录数: {removed_len}")
    print(f"保留记录数: {len(final_df)}")
    
    # 用-999999填充SCORE_ANTI_FRAUD的缺失值
    missing_count = final_df['SCORE_ANTI_FRAUD'].isna().sum()
    if missing_count > 0:
        print(f"\n填充SCORE_ANTI_FRAUD缺失值:")
        print(f"缺失值数量: {missing_count}")
        final_df['SCORE_ANTI_FRAUD'] = final_df['SCORE_ANTI_FRAUD'].fillna(-999999)
        print("已用-999999填充缺失值")
    
    # 打印处理结果
    print("\n最终数据处理结果:")
    print(f"原始数据湖记录数: {len(df_lake)}")
    print(f"flag_all=1的记录数: {df_lake['flag_all'].sum()}")
    print(f"最终合并后的记录数: {len(final_df)}")
    
    # 打印按flag_all分组的统计信息
    flag_counts = final_df['flag_all'].value_counts(dropna=True).sort_index()
    print("\nflag_all分组统计:")
    for flag_value, count in flag_counts.items():
        print(f"flag_all = {flag_value}: {count}条记录")
    
    # 打印按flag_all和APPLY_STS分组的统计信息
    cross_tab = pd.crosstab(final_df['flag_all'], final_df['APPLY_STS'], margins=True)
    print("\nflag_all和APPLY_STS交叉统计:")
    print(cross_tab)
    
    # 计算各组占比
    print("\n各组占比:")
    percentages = cross_tab.div(cross_tab['All'], axis=0) * 100
    print(percentages.round(2))
    
    # 保存结果
    final_df.to_csv('DNNFraud/dataOut/final_processed_data.csv', index=False)
    print("\n最终处理后的数据已保存到 dataOut/final_processed_data.csv")
    
    return final_df

def union_with_sample(final_df):
    """
    将final_df与样本数据合并
    """
    # 读取样本数据
    sample_df = pd.read_csv('DNNFraud/SAMPLE_S4_MODEL.csv')
    
    # 修改字段映射关系，删除重复的映射
    model_pro_dict = {
        # 删除 'app_num': 'APPLY_NO'，因为这两个字段实际上是同一个
        'APPLY_DATE': 'APP_DT',
        'perBrsM6ldMaxlnteday': 'br_var11',
        'perBrsM12ldCaonAllnum': 'br_var81',
        'perBrsM1IdBankNightOrgnum': 'br_var102',
        'perPbcPctLoan': 'gPctLoan',
        'perPbcNumNBLon': 'gNumNBLon',
        'perPbcNetFractionRvl': 'gNetFractionRvl',
        'perPbcPctTLM1Last12Mons': 'gPctTLM1Last12Mons',
        'perPbcTimesDelqLast12Mons': 'gTimesDelqLast12Mons',
        'perPbcNumlnqrsNBPPLast12Mons': 'gNuminqrsNBPPLast12Mons',
        'perPbcLoanPrdAmt': 'PL_AMT',
        'perPbcUtil75CC6M': 'P_UTL75_CC_6M',
        'perPbcCheckOrgCnt12M': 'CHECK_ORG_CNT_12M'
    }
    
    # 在重命名sample_df之前，先将APPLY_NO改为app_num
    sample_df = sample_df.rename(columns={'APPLY_NO': 'app_num'})
    
    # 获取需要保留的字段列表
    common_cols = [col for col in final_df.columns if col in sample_df.columns]
    mapping_cols_final = [col for col in model_pro_dict.keys() if col in final_df.columns]
    
    # 重命名sample_df的其他列名以匹配final_df
    sample_df_renamed = sample_df.copy()
    rename_dict = {v: k for k, v in model_pro_dict.items()}
    sample_df_renamed = sample_df_renamed.rename(columns=rename_dict)
    
    # 选择需要的列
    final_df_selected = final_df[common_cols + mapping_cols_final].copy()  # 使用copy()避免SettingWithCopyWarning
    sample_df_selected = sample_df_renamed[common_cols + mapping_cols_final].copy()
    
    # 添加modPro标记
    final_df_selected.loc[:, 'modPro'] = 1
    sample_df_selected.loc[:, 'modPro'] = 0
    
    # 合并数据
    merged_df = pd.concat([final_df_selected, sample_df_selected], axis=0)
    
    # 检查app_num的重复记录
    print("\nAPPLY_NO重复记录检查:")
    try:
        duplicates = merged_df.groupby('app_num', as_index=False).size()
        duplicate_counts = duplicates[duplicates['size'] > 1]
        
        if len(duplicate_counts) > 0:
            print(f"发现{len(duplicate_counts)}个重复的app_num")
            print("\n重复记录的详细信息:")
            print("重复次数统计:")
            print(duplicate_counts['size'].value_counts().sort_index())
            
            # 显示重复记录的modPro分布
            print("\n重复记录的modPro分布:")
            for apply_no in duplicate_counts['app_num']:
                dup_records = merged_df[merged_df['app_num'] == apply_no]
                print(f"\napp_num: {apply_no}")
                print("modPro分布:")
                print(dup_records['modPro'].value_counts())
        else:
            print("没有发现重复的app_num")
    except Exception as e:
        print(f"检查重复记录时出错: {str(e)}")
        print("\napp_num列的前几个值:")
        print(merged_df['app_num'].head())
    
    # 打印合并信息
    print("\n数据合并统计:")
    print(f"final_df记录数: {len(final_df)}")
    print(f"sample_df记录数: {len(sample_df)}")
    print(f"合并后总记录数: {len(merged_df)}")
    print(f"\n保留的字段数: {len(common_cols + mapping_cols_final)}")
    print("\nmodPro分布:")
    print(merged_df['modPro'].value_counts())
    
    # 打印按flag_all和APPLY_STS分组的统计信息
    cross_tab = pd.crosstab(merged_df['flag_all'], merged_df['APPLY_STS'], margins=True)
    print("\nflag_all和APPLY_STS交叉统计:")
    print(cross_tab)
    
    # 计算各组占比
    print("\n各组占比:")
    percentages = cross_tab.div(cross_tab['All'], axis=0) * 100
    print(percentages.round(2))
    
    # 保存结果
    merged_df.to_csv('DNNFraud/dataOut/merged_with_sample.csv', index=False)
    print("\n合并后的数据已保存到 dataOut/merged_with_sample.csv")

    
    return merged_df

def print_stats(df, name):
    """
    打印数据集的统计信息
    """
    bad_count = (df['flag_all'] == 1).sum()
    total_count = len(df)
    bad_ratio = bad_count / total_count * 100
    print(f"\n{name}统计:")
    print(f"总记录数: {total_count:,}")
    print(f"坏客户数: {bad_count:,}")
    print(f"坏客户比例: {bad_ratio:.2f}%")

def split_and_merge_data(final_df, merged_df):
    """
    按时间和坏客户比例拆分数据集，并与样本数据合并
    """
    print("\n=== 开始数据拆分和合并 ===")
    print_stats(final_df, "原始final_df")
    
    # 按时间排序
    final_df_sorted = final_df.sort_values('APP_DT')
    
    # 获取坏客户数据
    bad_cases = final_df_sorted[final_df_sorted['flag_all'] == 1]
    split_index = int(len(bad_cases) * 0.7)
    
    # 获取分割时间点
    split_date = bad_cases.iloc[split_index]['APP_DT']
    print(f"\n分割时间点: {split_date}")
    
    # 按时间分割数据
    train_df = final_df_sorted[final_df_sorted['APP_DT'] <= split_date]
    val_df = final_df_sorted[final_df_sorted['APP_DT'] > split_date]
    
    # 打印分割后的统计
    print("\n=== 时间分割后数据统计 ===")
    print_stats(train_df, "训练集")
    print_stats(val_df, "验证集")
    
    # 获取modPro=0的样本数据
    sample_data = merged_df[merged_df['modPro'] == 0].copy()
    print_stats(sample_data, "样本数据(modPro=0)")
    
    # 合并训练集
    final_train_df = pd.concat([train_df, sample_data])
    print("\n=== 最终数据集统计 ===")
    print_stats(final_train_df, "合并后的训练集")
    print_stats(val_df, "最终验证集")
    
    # 保存结果
    final_train_df.to_csv('DNNFraud/dataOut/final_train_data.csv', index=False)
    val_df.to_csv('DNNFraud/dataOut/final_validation_data.csv', index=False)
    print("\n最终数据集已保存到 dataOut/final_train_data.csv 和 final_validation_data.csv")
    
    return final_train_df, val_df

def evaluate_score_var_rst(val_df):
    """
    评估建信金科诚信度评分在验证集上的表现
    """
    # 读取建信金科诚信度评分数据
    score_df = pd.read_csv('DNNFraud/dataIn/建信金科诚信度评分SCORE_VAR_RST.csv')
    
    # 将验证集的app_num与score_df的APPLY_NO进行匹配
    intersection_df = pd.merge(
        val_df[['app_num', 'flag_all']], 
        score_df[['APPLY_NO', 'SCORE_ANTI_FRAUD']], 
        left_on='app_num', 
        right_on='APPLY_NO', 
        how='inner'
    )
    
    # 打印交集数据集信息
    print("\n=== 交集数据集统计 ===")
    print(f"交集记录数: {len(intersection_df):,}")
    print_stats(intersection_df, "交集数据")
    
    intersection_df['SCORE_ANTI_FRAUD'].fillna(-999999, inplace=True)
    # 计算AUC和KS
    ks, auc = calculate_ks_auc(intersection_df, 'SCORE_ANTI_FRAUD', 'flag_all')
    
    print("\n建信金科评分在验证集交集上的表现:")
    print(f"AUC值: {auc:.4f}")
    print(f"KS值: {ks:.4f}")
    
    return intersection_df, ks, auc

if __name__ == "__main__":
    final_df = process_final_data()
    
    # 检查缺失值
    print("\nSCORE_ANTI_FRAUD缺失值统计:")
    print(final_df['SCORE_ANTI_FRAUD'].isna().sum(), "条记录存在缺失")
    
    
    # 计算并打印KS和AUC
    ks, auc = calculate_ks_auc(final_df, 'SCORE_ANTI_FRAUD', 'flag_all')
    print("\n模型评估指标:")
    print(f"KS值: {ks:.4f}")
    print(f"AUC值: {auc:.4f}")
        
    # 与样本数据合并
    merged_df = union_with_sample(final_df)
    
    # 按时间和坏客户比例拆分数据集
    train_df, val_df = split_and_merge_data(final_df, merged_df)
    
    # 评估建信金科评分在验证集上的表现
    intersection_df, ks, auc = evaluate_score_var_rst(val_df)

    



    


    

