"""
飞检数据STEP8筛选 - clue_bad_tx.py
============================================================================

将 手术表与最大单价治疗关联，找出高编手术编码的线索。

主要功能：
1. 读入 clue861_library.xlsx 文件，获取 手术模式 txPtn 与 项目模式列表 itemPtnList 对应字典
2. 从 scene_item_ext 表中，加载对应医院的项目数据，筛选出符合各个 txPtn 的 治疗单价阈值p_limit
3. 关联 dip_case 与 tx 表，获得每个 setl_id 的所有手术编码，并且获得 setl_id 的 最大治疗项目单价 p
4. 在数据库中，关联 dip_case 与 tx 表 与 clue861dict1 表，筛选出违规线索
    4.1. 加载 env.xlsx 文件中的 dip_library 表，制作 dip_code 对应的 dip_score 字典
    4.2. 加载 env.xlsx 文件中的 tx_type 表，制作一类手术集合、二类手术集合、三类手术集合
    4.3. 计算 dip_amount 与 dip_amount_without_tx
5. 筛选出 tx 表中存在某个 手术编码tx_code 且同一个 setl_id 下 p 小于对应的 p_limit 的 setl_id，作为线索输出
    5.1. 输出 setl_id 以及对应的 tx_code, tx_name, tx_hsp_name, top_oper_code, top_oper_name, p, p_limit 字段
    5.2. 大概计算当前 dip 组可以获得的金额，以及如果去掉手术编码后的金额差值
    5.3. 作为 clue861_手术高编线索_内部_{hsp_code}.xlsx 文件输出
6. 统计违规 setl_id 下，触发违规的 手术编码tx_code 与各个治疗费、手术费的出现数量，作进一步为分析依据
    6.1. 输出 tx_code, tx_name, item_j_code, item_code, item_name, item_hsp_code, item_hsp_name, case_count
    6.2. 作为 stat_clue861_{hsp_code}.xlsx 的 stats 工作表输出
7. 作为 CLUE861_手术高编线索_内部_{hsp_code}.xlsx，即一个新的表格输出，供内部医学人员观看

============================================================================
"""
# 定义常量
PRICE310 = 14.66
PRICE390 = 10.76
FACTOR = 0.9


import os
import re
import sys
import time
import pandas as pd
from sqlalchemy import text
import json

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config import create_db_engine

_ILLEGAL_CHAR_RE = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f]")

def clean_illegal_chars(s: str) -> str:
    """Remove illegal characters from a string."""
    if pd.isna(s):
        return s
    return _ILLEGAL_CHAR_RE.sub("", s)


# 时间函数
t0 = time.time()
def elapsed() -> str:
    timeStr = time.strftime("%H:%M:%S", time.localtime())
    delta = int(time.time() - t0)
    if delta < 60:
        return f"{timeStr} (+ {delta} sec)"
    elif delta < 3600:
        m, s = divmod(delta, 60)
        return f"{timeStr} (+ {m} min {s} sec)"
    elif delta < 86400:
        h, rem = divmod(delta, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {h} hour {m} min {s} sec)"
    else:
        d, rem = divmod(delta, 86400)
        h, rem = divmod(rem, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {d} day {h} hour {m} min {s} sec)"
    
# 创建数据库引擎
engine = create_db_engine()

# 读取 clue861_library.xlsx 文件
def fetch_clue861_library():
    """读取 clue861_library.xlsx 文件，获取 手术模式 txPtn 与 项目模式列表 itemPtnList 对应字典"""
    dfLib = pd.read_excel('clue861_library.xlsx')
    tx_itemPtn_dict = {}
    for _, row in dfLib.iterrows():
        txPtn = row['txPtn']
        txPtnName = row['txPtnName']
        itemPtn = row['itemPtn']
        if txPtn in tx_itemPtn_dict:
            tx_itemPtn_dict[txPtn]['itemPtnList'].append(itemPtn)
        else:
            tx_itemPtn_dict[txPtn] = {
                'txPtnName': txPtnName,
                'itemPtnList': [itemPtn]
            }
    return tx_itemPtn_dict

# 加载医院项目数据，获得符合各个 txPtn 的治疗单价阈值 p_limit
def match_p_limit(hsp_abbr: str, tx_itemPtn_dict: dict) -> pd.DataFrame:
    """从 scene_item_ext 表中，加载对应医院的项目数据，筛选出符合各个 txPtn 的 治疗单价阈值p_limit"""
    with engine.connect() as conn:
        dfSie = pd.read_sql(
            text("SELECT item_j_code, item_code, item_name, p FROM scene_item_ext WHERE hsp_abbr = :hsp_abbr"),
            conn,
            params={"hsp_abbr": hsp_abbr}
        )
    # 遍历 tx_itemPtn_dict，筛选符合 itemPtnList 的项目，获得最大 p 作为 p_limit
    tx_pLimit_dict = {}
    for txPtn, v in tx_itemPtn_dict.items():
        itemPtnList = v['itemPtnList']
        # 初始化 p_limit
        p_limit = 999999
        for itemPtn in itemPtnList:
            # df筛选出 item_code 或 item_name == itemPtn 的行，获得最小 p
            dfMatched = dfSie[(dfSie['item_code'] == itemPtn) | (dfSie['item_name'] == itemPtn)]
            if not dfMatched.empty:
                # 获得最小p，以及最小p所对应的 item_j_code, item_code, item_name
                if dfMatched['p'].min() < p_limit:
                    p_limit = dfMatched['p'].min()
                    item_j_code_limit = dfMatched.loc[dfMatched['p'] == p_limit, 'item_j_code'].values[0]
                    item_code_limit = dfMatched.loc[dfMatched['p'] == p_limit, 'item_code'].values[0]
                    item_name_limit = dfMatched.loc[dfMatched['p'] == p_limit, 'item_name'].values[0]
        if p_limit != 999999:
            tx_pLimit_dict[txPtn] = {
                'txPtnName': v['txPtnName'],
                'p_limit': p_limit,
                'item_j_code_limit': item_j_code_limit,
                'item_code_limit': item_code_limit,
                'item_name_limit': item_name_limit
            }
    return tx_pLimit_dict

# 将 txPtn 扩充为 tx_code 列表，并且上传到数据库
def expand_txPtn(tx_pLimit_dict: dict, hsp_abbr: str):
    # 判断数据库是否有 clue861dict1 表，如果有则删除
    with engine.connect() as conn:
        conn.execute(text("""
            BEGIN
                EXECUTE IMMEDIATE 'DROP TABLE clue861dict1';
            EXCEPTION
                WHEN OTHERS THEN
                    IF SQLCODE != -942 THEN
                        RAISE;
                    END IF;
            END;
        """))
        conn.commit()
        # 创建 clue861dict1 表
        conn.execute(text("""
            CREATE TABLE clue861dict1 (
                hsp_abbr VARCHAR2(100),
                tx_code VARCHAR2(100),
                tx_name VARCHAR2(500),
                tx_hsp_name VARCHAR2(500),
                p_limit NUMBER,
                item_j_code_limit NUMBER,
                item_code_limit VARCHAR2(100),
                item_name_limit VARCHAR2(500)
            )
        """))
    
    # 获取 scene_icd 表中的所有 tx_code, tx_name, tx_hsp_name
    with engine.connect() as conn:
        dfSceneIcd = pd.read_sql("SELECT icd_code, icd_name, icd_hsp_name FROM scene_icd WHERE icd_type='tx' AND icd_code IS NOT NULL", conn)
    dataSceneIcd = dfSceneIcd.to_dict(orient='records')
    # 遍历 dataSceneIcd，归集数据
    data = []
    for datumSceneIcd in dataSceneIcd:
        tx_code = datumSceneIcd['icd_code']
        tx_name = datumSceneIcd['icd_name']
        tx_hsp_name = datumSceneIcd['icd_hsp_name']
        tx_ptn = 'tx'+tx_code[:5]
        # 如果有 tx_ptn，那么将对应的数据加入 data
        if tx_ptn in tx_pLimit_dict:
            data.append({
                'hsp_abbr': hsp_abbr,
                'tx_code': tx_code,
                'tx_name': tx_name,
                'tx_hsp_name': tx_hsp_name,
                'p_limit': tx_pLimit_dict[tx_ptn]['p_limit'],
                'item_j_code_limit': tx_pLimit_dict[tx_ptn]['item_j_code_limit'],
                'item_code_limit': tx_pLimit_dict[tx_ptn]['item_code_limit'],
                'item_name_limit': tx_pLimit_dict[tx_ptn]['item_name_limit']
            })
    # 上传 data 到 clue861dict1 表
    dfClue861dict1 = pd.DataFrame(data)
    with engine.connect() as conn:
        dfClue861dict1.to_sql('clue861dict1', conn, if_exists='append', index=False)
    # clue861dict1 表对 tx_code, item_j_code_limit, p_limit, hsp_abbr 分别建立索引
    with engine.connect() as conn:
        conn.execute(text("CREATE INDEX idx_clue861dict1_tx_code ON clue861dict1 (tx_code)"))
        conn.execute(text("CREATE INDEX idx_clue861dict1_item_j_code_limit ON clue861dict1 (item_j_code_limit)"))
        conn.execute(text("CREATE INDEX idx_clue861dict1_p_limit ON clue861dict1 (p_limit)"))
        conn.execute(text("CREATE INDEX idx_clue861dict1_hsp_abbr ON clue861dict1 (hsp_abbr)"))
        conn.commit()

# 在数据库中，关联 dip_case 与 tx 表 与 clue861dict1 表，筛选出违规线索
def filte_clue_data(hsp_code: str) -> pd.DataFrame:
    """筛选出违规线索"""
    query = text("""
        SELECT
            dc.setl_id,
            dc.dip_code,
            dc.dip_name,
            dc.dip_score,
            m.insutype,
            m.gnr_c,
            t.tx_code,
            t.tx_name,
            t.tx_hsp_name,
            dc.top_oper_code,
            dc.top_oper_name,
            dc.p,
            c.p_limit,
            c.item_j_code_limit,
            c.item_code_limit,
            c.item_name_limit
        FROM
            dip_case dc
        JOIN
            tx t ON dc.setl_id = t.setl_id
        JOIN
            clue861dict1 c ON t.tx_code = c.tx_code
        JOIN
            m ON dc.setl_id = m.setl_id
        WHERE
            c.hsp_abbr = :hsp_code
            AND dc.p < c.p_limit
    """)
    with engine.connect() as conn:
        dfClue = pd.read_sql(query, conn, params={"hsp_code": hsp_code})
    return dfClue

# 加载 dip_library 表，制作 dip_code 对应的 dip_score 字典
def fetch_dip_library() -> dict:
    """加载 dip_library 表，制作 dip_code 对应的 dip_score 字典"""
    # 读取 env.xlsx 文件中的 dip_library 表
    dfDipLib = pd.read_excel('env.xlsx', sheet_name='dip_library')
    dip_score_dict = dfDipLib.set_index('dip_code')['dip_score'].to_dict()
    return dip_score_dict

# 加载 tx_type 表，制作一类手术集合、二类手术集合、三类手术集合
def fetch_tx_type() -> dict:
    """加载 tx_type 表，制作一类手术集合、二类手术集合、三类手术集合"""
    # 读取 env.xlsx 文件中的 tx_type 表
    dfTxType = pd.read_excel('env.xlsx', sheet_name='tx_type')
    txType1_set = set(dfTxType[dfTxType['tx_class'] == '一类']['tx_code'].tolist())
    txType2_set = set(dfTxType[dfTxType['tx_class'] == '二类']['tx_code'].tolist())
    txType3_set = set(dfTxType[dfTxType['tx_class'] == '三类']['tx_code'].tolist())
    return txType1_set, txType2_set, txType3_set

# 已知单个病例计算 dip_amount 的函数
def _cal_single_dip_amount(row: pd.Series, price310: float, price390: float, factor: float) -> float:
    # 假设传入的是 dfClue 的一行，需要知道的是 dip_score, insutype, gnr_c
    dip_score = row['dip_score']
    insutype = row['insutype']
    gnr_c = row['gnr_c']
    # 根据 insutype 计算 price
    if insutype in ('310', 310) or '职工' in insutype:
        price = price310 * factor
    else:
        price = price390 * factor
    # 计算病种均费 avg_c = dip_score
    avg_c = dip_score * price
    # 根据 gnr_c 与 avg_c 的关系，计算 dip_amount
    if gnr_c < avg_c * 0.5:
        dip_amount = gnr_c
    elif gnr_c > avg_c * 2:
        dip_amount = ( gnr_c / avg_c - 1 ) * dip_score * price
    else:
        dip_amount = dip_score * price
    return dip_amount

# 已知单个病例，推断失去所有手术之后会进入哪一个 dip_group 组
def _cal_single_dip_an(
        row: pd.Series, dip_score_dict: dict, 
        txType1_set: set, txType2_set: set, txType3_set: set
) -> str:
    # 假设传入的是 dfClue 的一行，需要知道的是 dip_code, tx_code
    dip_code = row['dip_code']
    tx_code = row['tx_code']
    dx_ptn, tx_ptn = dip_code.split('|')
    # 如果 tx_ptn 是 'a(n)'，那么就是当前手术组
    if tx_ptn == 'a(n)':
        return dip_code
    
    # 判断相关性：如果 tx_ptn 在解开之后，其中某一个元素 在 tx_code 中出现，那么就是相关手术
    # 解开 tx_ptn
    if '/' in tx_ptn:
        tx_ptn_list = tx_ptn.split('/')
    elif '+' in tx_ptn:
        tx_ptn_list = tx_ptn.split('+')
    else:
        tx_ptn_list = [tx_ptn]
    # 判断相关性
    is_related = False
    if tx_ptn == '一类' and tx_code in txType1_set:
        is_related = True
    elif tx_ptn == '二类' and tx_code in txType2_set:
        is_related = True
    elif tx_ptn == '三类' and tx_code in txType3_set:
        is_related = True
    else:
        for tptn in tx_ptn_list:
            if tptn in tx_code:
                is_related = True
                break
    # 如果不相关，输出原 dip_code
    if not is_related:
        return dip_code

    # 从 dip_score_dict 表中，获取所有 dip_code 列表
    dip_codes = list(dip_score_dict.keys())
    # 遍历 dip_codes，找到 dx_ptn 符合，且 tx_ptn 为 'a(n)' 的 dip_code
    for dc in dip_codes:
        dx, tx = dc.split('|')
        if dx == dx_ptn and tx == 'a(n)':
            return dc
    # 如果没有找到，返回原 dx_ptn的第一个字母大写 + '|a(n)'
    return dx_ptn[0].upper() + '|a(n)'

# 已知单个病例，推断失去当前手术之后进入的 dip_group 组，计算 dip_amount_without_tx
def _cal_single_dip_an_amount(row: pd.Series, price310: float, price390: float, factor: float) -> float:
    # 假设传入的是 dfClue 的一行，需要知道的是 dip_score, insutype, gnr_c
    dip_score = row['dip_score_an']
    insutype = row['insutype']
    gnr_c = row['gnr_c']

    # 根据 insutype 计算 price
    if insutype in ('310', 310) or '职工' in insutype:
        price = price310 * factor
    else:
        price = price390 * factor
    # 计算病种均费 avg_c = dip_score
    avg_c = dip_score * price
    # 根据 gnr_c 与 avg_c 的关系，计算 dip_amount_without_tx
    if gnr_c < avg_c * 0.5:
        dip_amount_an = gnr_c
    elif gnr_c > avg_c * 2:
        dip_amount_an = ( gnr_c / avg_c - 1 ) * dip_score * price
    else:
        dip_amount_an = dip_score * price
    return dip_amount_an

# 大概计算当前 dip 组可以获得的金额，以及如果去掉手术编码后的金额差值
def calculate_dip_amounts(
        dfClue: pd.DataFrame, dip_score_dict: dict, 
        price310: float, price390: float, factor: float,
        txType1_set: set, txType2_set: set, txType3_set: set
) -> pd.DataFrame:
    """计算 dip_amount 与 dip_amount_without_tx"""
    # 计算 dip_amount
    dfClue['dip_amount'] = dfClue.apply(
        lambda row: _cal_single_dip_amount(row, price310, price390, factor), axis=1
    )
    # 计算 dip_code_an
    dfClue['dip_code_an'] = dfClue.apply(
        lambda row: _cal_single_dip_an(row, dip_score_dict, txType1_set, txType2_set, txType3_set), axis=1
    )
    # 计算 dip_score_an
    dfClue['dip_score_an'] = dfClue['dip_code_an'].map(dip_score_dict)
    # 计算 dip_amount_an
    dfClue['dip_amount_an'] = dfClue.apply(
        lambda row: _cal_single_dip_an_amount(row, price310, price390, factor), axis=1
    )
    # 计算 dip_amount_diff
    dfClue['dip_amount_diff'] = dfClue['dip_amount'] - dfClue['dip_amount_an']
    return dfClue

# 统计违规 setl_id 下，触发违规的 手术编码tx_code 与各个治疗费、手术费的出现数量
def statistic_clue_data(hsp_abbr: str) -> pd.DataFrame:
    """统计违规 setl_id 下，触发违规的 手术编码tx_code 与各个治疗费、手术费的出现数量"""
    sql = text("""
        WITH T0 AS (
            -- 找出违规的 setl_id 和 tx_code
            SELECT
                dc.setl_id,
                t.tx_code,
                t.tx_name,
                dc.p,
                c.item_code_limit,
                c.item_name_limit
            FROM dip_case dc
                JOIN tx t ON dc.setl_id = t.setl_id
                JOIN clue861dict1 c ON t.tx_code = c.tx_code
                JOIN m ON dc.setl_id = m.setl_id
            WHERE
                c.hsp_abbr = :hsp_code
                AND dc.p < c.p_limit
        )
        ,T1 AS (
            SELECT t0.tx_code, count(distinct t0.setl_id) AS tx_case_count
            FROM t0
            GROUP BY t0.tx_code
        )
        -- 关联 d 表，统计治疗费与手术费使用数量
        SELECT
            t0.tx_code,
            MAX(t0.item_code_limit) AS item_code_limit,
            MAX(t0.item_name_limit) AS item_name_limit,
            MAX(t1.tx_case_count) AS tx_case_count,
            t0.tx_name,
            d.item_j_code,
            MAX(d.item_code) AS item_code,
            MAX(d.item_name) AS item_name,
            MAX(d.item_hsp_code) AS item_hsp_code,
            MAX(d.item_hsp_name) AS item_hsp_name,
            MAX(d.p) as max_p,
            MIN(d.p) as min_p,
            COUNT(DISTINCT t0.setl_id) AS case_count,
            COUNT(DISTINCT t0.setl_id)/MAX(t1.tx_case_count) AS usage
        FROM t0, t1, d
        WHERE t0.setl_id = d.setl_id AND d.item_type in ('治疗费','手术费')
            AND t0.tx_code = t1.tx_code
        GROUP BY t0.tx_code, t0.tx_name, d.item_j_code
        ORDER BY t0.tx_code
    """)
    with engine.connect() as conn:
        dfStats = pd.read_sql(sql, conn, params={"hsp_code": hsp_abbr})
    return dfStats

# 输出违规线索的大明细
def output_clue_data(dfClue: pd.DataFrame, hsp_abbr: str):
    """输出违规线索的大明细"""
    # 从 dfClue 中获得 setl_id 列表
    setl_id_list = dfClue['setl_id'].unique().tolist()
    # 包装成为 sql 中的 in 参数，并且如果超过1000个，则分批处理
    batch_size = 900
    setl_id_sql = ""
    for i in range(0, len(setl_id_list), batch_size):
        batch_setl_ids = setl_id_list[i:i+batch_size]
        in_params = ','.join([f"'{sid}'" for sid in batch_setl_ids])
        if setl_id_sql != "":
            setl_id_sql += " OR "
        setl_id_sql += f"vs.结算ID IN ({in_params})"
    # 查结算信息
    sql_setl = text(f"""
        SELECT
            vs.结算ID,
            vs.人员编号, vs.人员姓名, vs.人员证件类型, vs.证件号码, vs.性别, vs.出生日期, vs.年龄,
            vs.险种类型, vs.人员类别, vs.定点医药机构编号, vs.定点医药机构名称, vs.开始日期, vs.结束日期, vs.结算时间,
            vs.医疗类别, vmd."住院/门诊号", vmd.入院科室名称, vmd.出院科室名称, vmd.离院方式, vs.医疗费总额
        FROM v_setl_map vs, v_mdtrt_map vmd
        WHERE vs.就诊ID = vmd.就诊ID
            AND ({setl_id_sql})
    """)
    with engine.connect() as conn:
        dfSetl = pd.read_sql(sql_setl, conn)
    # 查诊断信息
    sql_dx = text(f"""
        SELECT
            dx.setl_id as 结算ID,
            listagg(distinct case when dx.is_main=1 then dx.dx_code else null end,',') within group(order by dx.dx_code) as 主诊断编码,
            listagg(distinct case when dx.is_main=1 then dx.dx_hsp_name else null end,',') within group(order by dx.dx_code) as 主诊断名称,
            listagg(distinct dx.dx_code,',') within group(order by dx.dx_code) as 全诊断编码,
            listagg(distinct dx.dx_hsp_name,',') within group(order by dx.dx_hsp_name) as 全诊断名称
        FROM dx
        WHERE 1=1
            AND ({setl_id_sql.replace('vs.结算ID', 'dx.setl_id')})
        GROUP BY dx.setl_id
    """)
    with engine.connect() as conn:
        dfDx = pd.read_sql(sql_dx, conn)
    # 查手术信息
    sql_tx = text(f"""
        SELECT
            tx.setl_id as 结算ID,
            listagg(distinct case when tx.is_main=1 then tx.tx_code else null end,',') within group(order by tx.tx_code) as 主手术编码,
            listagg(distinct case when tx.is_main=1 then tx.tx_hsp_name else null end,',') within group(order by tx.tx_code) as 主手术名称,
            listagg(distinct tx.tx_code,',') within group(order by tx.tx_code) as 全手术编码,
            listagg(distinct tx.tx_hsp_name,',') within group(order by tx.tx_hsp_name) as 全手术名称
        FROM tx
        WHERE 1=1
            AND ({setl_id_sql.replace('vs.结算ID', 'tx.setl_id')})
        GROUP BY tx.setl_id
    """)
    with engine.connect() as conn:
        dfTx = pd.read_sql(sql_tx, conn)
    # 修饰 dfClue 的列名
    dfClueFormat = dfClue.copy()
    dfClueFormat = dfClueFormat[[
        'setl_id', 'dip_code', 'dip_name', 'dip_score', 'tx_code', 'tx_name', 
        'dip_amount', 'dip_code_an', 'dip_score_an', 'dip_amount_an', 'dip_amount_diff'
    ]]
    dfClueFormat = dfClueFormat.rename(columns={
        'setl_id': '结算ID', 
        'tx_code': '高编手术编码', 'tx_name': '高编手术名称', 
        'dip_code': '当前DIP编码', 'dip_name': '当前DIP名称', 'dip_score': '当前DIP分值', 'dip_amount': '当前DIP结算额',
        'dip_code_an': '校正后DIP编码', 'dip_score_an': '校正后DIP分值', 'dip_score_an': '校正后DIP分值', 'dip_amount_an': '校正后DIP结算额', 
        'dip_amount_diff': '结算额差值'
    })
    # 合并 dfSetl, dfDx, dfTx, dfClue
    dfClueData = dfClueFormat.merge(dfSetl, on='结算ID', how='inner')
    dfClueData = dfClueData.merge(dfDx, left_on='结算ID', right_on='结算ID', how='left', suffixes=('', '_dx'))
    dfClueData = dfClueData.merge(dfTx, left_on='结算ID', right_on='结算ID', how='left', suffixes=('', '_tx'))
    return dfClueData

# 删除中间表
def drop_intermediate_tables():
    """删除中间表 clue861dict1"""
    with engine.connect() as conn:
        conn.execute(text("""
            BEGIN
                DELETE FROM clue861dict1;
            EXCEPTION
                WHEN OTHERS THEN
                    IF SQLCODE != -942 THEN
                        RAISE;
                    END IF;
            END;
        """))
        conn.commit()

# 主程序
if __name__ == "__main__":
    # 定义常量
    price310 = PRICE310
    price390 = PRICE390
    factor = FACTOR
    # 或者针对每个 hsp 定义不同 factor
    hspFactor_dict = {}

    # STEP1：读入 clue861_library.xlsx 文件，获取 手术模式 txPtn 与 项目模式列表 itemPtnList 对应字典
    tx_itemPtn_dict = fetch_clue861_library()
    print(f"{elapsed()} STEP1：读取 clue861_library.xlsx 文件，获取 手术模式 txPtn 与 项目模式列表 itemPtnList 对应字典 完成。")

    # 从 hspList.json 文件中，读取医院列表
    with open('hspList.json', 'r', encoding='utf-8') as f:
        hspList = json.load(f)
    for hspDatum in hspList:
        hspAbbr = hspDatum['hsp_abbr']

        # STEP2：加载医院项目数据，获得符合各个 txPtn 的治疗单价阈值 p_limit
        tx_pLimit_dict = match_p_limit(hspAbbr, tx_itemPtn_dict)
        print(f"{elapsed()} STEP2：加载医院 {hspAbbr} 项目数据，获得符合各个 txPtn 的治疗单价阈值 p_limit 完成。")

        # STEP3：将 txPtn 扩充为 tx_code 列表，并且上传到数据库
        expand_txPtn(tx_pLimit_dict, hspAbbr)
        print(f"{elapsed()} STEP3：医院 {hspAbbr} 的 txPtn 扩充为 tx_code 列表，并且上传到数据库 完成。")
    
        # STEP4：在数据库中，关联 dip_case 与 tx 表 与 clue861dict1 表，筛选出违规线索
        dfClue = filte_clue_data(hspAbbr)
        print(f"{elapsed()} STEP4：医院 {hspAbbr} 的违规线索筛选完成，共 {len(dfClue)} 条。")

        if dfClue.empty:
            print(f"{elapsed()} 医院 {hspAbbr} 无违规线索，跳过后续步骤。")
            continue

        # STEP5：计算 dip_amount 与 dip_amount_without_tx
        dip_score_dict = fetch_dip_library()
        txType1_set, txType2_set, txType3_set = fetch_tx_type()
        if len(hspFactor_dict) > 0 and hspAbbr in hspFactor_dict:
            hspFactor = hspFactor_dict[hspAbbr]
        else:
            hspFactor = factor
        dfClue = calculate_dip_amounts(
            dfClue, dip_score_dict, 
            price310=price310, price390=price390, factor=hspFactor,
            txType1_set=txType1_set, txType2_set=txType2_set, txType3_set=txType3_set
        )
        # 删除 dfClue 中 dip_amount_diff 小于等于0 的行，并且每一个 setl_id 只保留一行，只保留 dip_amount_diff 最大的行
        dfClue = dfClue[dfClue['dip_amount_diff'] > 0]
        dfClue = dfClue.sort_values(by=['setl_id', 'dip_amount_diff'], ascending=[True, False])
        dfClue = dfClue.drop_duplicates(subset=['setl_id'], keep='first')
        print(f"{elapsed()} STEP5：医院 {hspAbbr} 的 dip_amount 与 dip_amount_without_tx 计算完成，共 {len(dfClue)} 条。")

        # STEP6：输出 clue_bad_tx_{hsp_code}.xlsx 文件
        # 判断有没有 clue 目录，没有则创建
        if not os.path.exists(r'STEP8筛选/clue'):
            os.makedirs(r'STEP8筛选/clue')

        output_filename = fr"STEP8筛选/clue/stat_clue861_{hspAbbr}.xlsx"
        with pd.ExcelWriter(output_filename, engine='openpyxl') as writer:
            # 主工作表
            dfClue.to_excel(writer, sheet_name='线索主单列表', index=False)
            # 统计工作表
            dfStats = statistic_clue_data(hspAbbr)
            dfStats.to_excel(writer, sheet_name='线索统计', index=False)
        print(f"{elapsed()} STEP6：医院 {hspAbbr} 的线索输出到文件 {output_filename} 完成。")

        # STEP7：输出违规线索的大明细
        dfClueData = output_clue_data(dfClue, hspAbbr)
        # 判断是否存在各级目录，没有则创建
        if not os.path.exists(r"STEP8筛选/clue"):
            os.makedirs(r"STEP8筛选/clue")
        if not os.path.exists(fr"STEP8筛选/clue/{hspAbbr}"):
            os.makedirs(fr"STEP8筛选/clue/{hspAbbr}")
        output_data_filename = fr"STEP8筛选/clue/{hspAbbr}/clue861_手术高编线索_内部_{hspAbbr}.xlsx"
        with pd.ExcelWriter(output_data_filename, engine='openpyxl') as writer:
            dfClueData.to_excel(writer, sheet_name='违规线索大明细', index=False)
        print(f"{elapsed()} STEP7：医院 {hspAbbr} 的违规线索大明细输出到文件 {output_data_filename} 完成。")

        print(f"{elapsed()} 医院 {hspAbbr} 全部处理完成。\n")

    drop_intermediate_tables()
    print(f"{elapsed()} 中间表 clue861dict1 删除完成。")
    print(f"{elapsed()} 全部处理完成。")