import pandas as pd
import hashlib as hl
import os
import json
import numpy as np
import time

def stable_hash(row):
    """
    计算唯一ID
    """
    s = f"""
    {row['RULE_ID']}{row['AND_OR']}{row['ITEM_CODE']}
    {row['CONDITION_JSON']}{row['BRANCH_GRADE']}{row['BRANCH_NO']}
    """
    return int(hl.sha256(s.encode('utf-8')).hexdigest(), 16) % (10 ** 18)

def normalize(data):
    """
    递归地对字典、列表进行标准化：
    - 列表排序
    - 字典按 key 排序，并递归处理值
    """
    if isinstance(data, dict):
        return {k: normalize(v) for k, v in sorted(data.items())}
    elif isinstance(data, list):
        return sorted([normalize(item) for item in data], key=lambda x: str(x))
    else:
        return data

def load_csv(csv):
    # 加载新规则csv
    df_csv = pd.read_csv(csv, sep=',', parse_dates=['CREATE_DATE', 'UPDATE_DATE'], date_format='%d/%m/%Y %H:%M:%S')
    df_csv['CONDITION_JSON'] = df_csv['CONDITION_JSON'].astype(object).apply(normalize)
    # 创建唯一ID
    df_csv['sha256'] = df_csv.apply(lambda x: stable_hash(x), axis=1)
    return df_csv

# 遍历加载.\rule\xianchang下的是csv文件
def update_csv(new_rule, xc_path, up_path):
    # 当前日期，格式yyyymmdd
    now = time.strftime('%Y%m%d')

    for file in os.listdir(xc_path):
        if file.endswith('.csv'):
            print(f'开始对比{file}...')
            old_rule = load_csv(os.path.join(xc_path, file))
            # 先将old_rule和new_rule ID 相同的ID列表
            same_id_list = old_rule[old_rule['ID'].isin(new_rule['ID'])]['ID'].tolist()

            # 移除old_rule和new_rule 含共有ID的行
            old_rule = old_rule[~old_rule['ID'].isin(same_id_list)]
            new_rule = new_rule[~new_rule['ID'].isin(same_id_list)]

            # 计算差异
            old_diff = old_rule[~old_rule['sha256'].isin(new_rule['sha256'])]
            new_diff = new_rule[~new_rule['sha256'].isin(old_rule['sha256'])]

            # old_diff按ITEM_CODE、RULE_ID 遍历匹配 new_diff
            del_id = []
            upd_diff = new_diff.copy()
            for _, row in old_diff.iterrows():
                # 按ITEM_CODE、RULE_ID 筛选new_diff
                matched_rows = new_diff [(new_diff['ITEM_CODE'] == row['ITEM_CODE']) & (new_diff['RULE_ID'] == row['RULE_ID'])]
                if not matched_rows.empty:
                    # 找到匹配的行，比较两个记录create_date
                    if pd.to_datetime(matched_rows['CREATE_DATE'].max()) > pd.to_datetime(row['CREATE_DATE']):
                        # 新记录create_date 大于旧记录create_date，记录ID
                        del_id.append(row['ID'])
                        # new_diff 中的STATUS 改为 old_diff 的status
                        if row['STATUS'] == '0':
                            upd_diff.loc[upd_diff['ID'].isin(matched_rows['ID']), 'STATUS'] = '0'
                    else:
                        upd_diff = upd_diff.drop(matched_rows.index)

            # del_id列表分成数量每500的列表集合        
            if del_id:
                del_id_list = [del_id[i:i + 500] for i in range(0, len(del_id), 500)]
                with open(f'{up_path}{file.replace(".csv", "")}_需覆盖删除数据_{now}.sql', 'w') as f:
                    for del_id in del_id_list:
                        sql = f"delete from MIA_RULE_ITEM_V1 where ID in ({','.join([f'{id}' for id in del_id])});\r\n"
                        f.write(sql)

            # 先清理控制字符 本次新增规则
            upd_diff.loc[:, 'CONDITION_JSON'] = upd_diff['CONDITION_JSON'].str.replace(r'[\x00-\x1f\x7f]', '', regex=True)
            # 导出到json 导出格式 "CREATE_DATE": "30/12/2023 12:12:12"
            upd_diff['CREATE_DATE'] = upd_diff['CREATE_DATE'].dt.strftime('%d/%m/%Y %H:%M:%S')
            upd_diff['UPDATE_DATE'] = upd_diff['UPDATE_DATE'].dt.strftime('%d/%m/%Y %H:%M:%S')
            output_file = f'{up_path}{file.replace(".csv", "")}_本次新增规则_{now}.json'
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(upd_diff.replace({np.nan: None}).to_dict('records'), f, ensure_ascii=False, indent=4)
                upd_diff.loc[:, 'CONDITION_JSON'] = upd_diff['CONDITION_JSON'].str.replace(r'[\x00-\x1f\x7f]', '', regex=True)
            

# main
if __name__ == '__main__':
    print('开始差量对比...')
    # 定位到项目根目录
    root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    # 加载新规则csv
    new_rule = load_csv(f'{root_path}/rule/MIA_RULE_ITEM_V1.csv')
    update_csv(new_rule, f'{root_path}/rule/xianchang/', f'{root_path}/rule/update/')
    print('差量对比完成！已生成更新文件。')