"""
飞检数据STEP8筛选 - clue_missing_tx.py
============================================================================

将 手术表与最大单价治疗、呼吸机等特别项目关联，找出缺失手术编码或者主诊断编码不对应的线索。
注！！！！这个线索的逻辑由地区分组方式决定。现在仅限于江门地区。

主要功能：
1. 读入 clue863_library.xlsx 文件，有四列：ITEM_CODE, ITEM_NAME, TX_CODE, TX_NAME，构建项目编码与ICD9手术编码的映射关系。
2. 从 dip_case 表中获得 tx 表中同一个 setl_id 下没有任何手术的病例的，但操作最大单价>500元或有呼吸机的病例。
    2.1. 先筛选出 dip_case 中 tx_pattern = 'a(n)' 的病例，作为 list[dict] 结构保存
    2.2. 查出这些病例的手术信息，作为 dict 结构保存，key 为 setl_id，tx_codes 为手术编码列表
    2.3. 查出这些病例的诊断信息，作为 dict 结构保存，key 为 setl_id, dx_codes 为诊断编码列表
    2.4. 查出这些病例当中有使用无创呼吸机的 setl_id 集合
    2.5. 查出这些病例当中有使用有创呼吸机的 setl_id 与以及使用时间，作为 dict 结构保存，key 为 setl_id, sum_q 为使用时间（小时）
3. 对上述数据进行整合，整合到 list[dict] 结构中。dx_codes 是浮动诊断池，tx_codes 是固定手术池
    3.1. 有创呼吸机 >= 96 小时，则 96.7201 加入固定手术池
    3.2. 有创呼吸机 < 96 小时，则 96.7101 加入固定手术池
    3.3. 没有有创呼吸机，但有无创呼吸机，则 93.9000 加入固定手术池
    3.4. 查找最大单价治疗项目，匹配 clue863_library.xlsx，找到对应的 ICD9 手术编码，作为浮动手术池
4. 读入分组库
    4.1. 读入 env.xlsx 的 dip_library 表
    4.2. 读入 env.xlsx 的 tx_type 表，根据 tx_class 列为一类、二类、三类，分别保存 tx_type1_set, tx_type2_set, tx_type3_set 三个集合
5. 对整合的数据进行分组
    5.1. 每一个浮动诊断池*【每一个浮动手术池+固定手术池】作为一个分组情况，每个情况模拟一个虚拟的 setl_id，上传到数据库 clue863tmp_m, clue863tmp_dx, clue863tmp_tx 表
    5.2. 根据 dip_library 表进行分组，采用 step5_7_create_dip 的分组逻辑，全分组结果保存到 clue863tmp_dip 表，最佳分组结果保存到 clue863tmp_dip_case 表
6. 对分组结果进行分析，找出如果补充了缺失手术编码、或者更换了一个主诊断编码，带来的分组变化与收益增加
    6.1. 对 dip_library 中的分组分值库进行计算，计算分组前与分组后的结算金额，以及其差值
    6.2. 仅保留差值 > 0 的 且 当前病例总费用不超过分组均值的 2 倍、或者小于分组均值 0.5 倍的结果

============================================================================
"""



from __future__ import annotations
import os
import sys
from pathlib import Path
from typing import Callable
import argparse
import pandas as pd
from sqlalchemy import text
from sqlalchemy.engine import Engine
import time
import json
import openpyxl
from openpyxl.styles import Alignment
from openpyxl.styles import Border, Side

ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if ROOT_DIR not in sys.path:
    sys.path.append(ROOT_DIR)

from config import create_db_engine  # noqa: E402

# 定义常量
PRICE310 = 14.66
PRICE390 = 10.76
FACTOR = 0.9


# 时间函数
t0 = time.time()
def elapsed() -> str:
    """返回自脚本启动以来的耗时 (HH:MM:SS)。
    用于将原先的 [INFO]/[START]/[DONE] 等等级标签替换成实时耗时。
    """
    
    # 实际时间，yyy-mm-dd hh:mm:ss
    timeStr = time.strftime("%H:%M:%S", time.localtime())
    
    # 已消耗时间 XX hours XX minutes XX.XX seconds
    delta = int(time.time() - t0)
    if delta < 60:
        return f"{timeStr} (+ {delta} sec)"
    elif delta < 3600:
        m, s = divmod(delta, 60)
        return f"{timeStr} (+ {m} min {s} sec)"
    elif delta < 86400:
        h, rem = divmod(delta, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {h} hour {m} min {s} sec)"
    else:
        d, rem = divmod(delta, 86400)
        h, rem = divmod(rem, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {d} day {h} hour {m} min {s} sec)"

# 获得医院列表
def get_hsp_list() -> list:
    """获得需要处理的医院列表。"""
    with open('hspList.json', 'r', encoding='utf-8') as f:
        hsp_list = json.load(f)
    return hsp_list

# ============
# 读入各种库文件
# ============
def load_clue_library() -> dict:
    """读入 clue863_library.xlsx 文件，返回 DataFrame。"""
    file_path = fr'clue863_library.xlsx'
    df_clue = pd.read_excel(file_path, dtype=str)
    # 以 item_name 为 key，tx_code 的列表为 value，构建字典
    item_name_list = df_clue['ITEM_NAME'].drop_duplicates().tolist()
    clue_dict = {}
    for item_name in item_name_list:
        tx_codes = df_clue[df_clue['ITEM_NAME'] == item_name]['TX_CODE'].dropna().tolist()
        if tx_codes:
            # 获得整个 tx_code 列表
            clue_dict[item_name] = tx_codes
    return clue_dict

def load_dip_library() -> pd.DataFrame:
    """读入 env.xlsx 文件，返回 dip_library DataFrame。"""
    file_path = fr'env.xlsx'
    df_dip_library = pd.read_excel(file_path, sheet_name='dip_library', dtype=str)
    return df_dip_library

def load_icd_name(engine: Engine, df_dip_library: pd.DataFrame) -> dict:
    """从 stable_icd10, stable_icd9 表中读取 ICD10 与 ICD9 的名称字典。"""
    sql_icd10 = text('select distinct dx, dxname from stable_icd10')
    sql_icd9 = text('select distinct tx, txname from stable_icd9')
    icd_name_dict = {}
    with engine.connect() as conn:
        result = conn.execute(sql_icd10)
        for row in result:
            mapping = row._mapping
            icd_name_dict[mapping['dx']] = mapping['dxname']
        result = conn.execute(sql_icd9)
        for row in result:
            mapping = row._mapping
            icd_name_dict[mapping['tx']] = mapping['txname']
    
    # 从 df_dip_library 中补充 icd_name_dict
    extra_dict_df = df_dip_library[~df_dip_library['tx_pattern'].isin(icd_name_dict.keys())][
        ['tx_pattern', 'tx_pattern_name']].drop_duplicates()
    extra_dict_df['tx_pattern'] = extra_dict_df['tx_pattern'].str.lower()
    extra_dict = extra_dict_df.set_index('tx_pattern')['tx_pattern_name'].to_dict()
    icd_name_dict.update(extra_dict)

    extra_dict2_df = df_dip_library[~df_dip_library['手术操作编码(国临3.0)'].isin(icd_name_dict.keys())][
        ['手术操作编码(国临3.0)', 'tx_pattern_name']].drop_duplicates()
    extra_dict2_df['手术操作编码(国临3.0)'] = extra_dict2_df['手术操作编码(国临3.0)'].str.lower()
    extra_dict2 = extra_dict2_df.set_index('手术操作编码(国临3.0)')['tx_pattern_name'].to_dict()
    icd_name_dict.update(extra_dict2)

    return icd_name_dict

# =================
# 查询数据库，获得信息
# =================
def fetch_an_data(engine: Engine, hsp_abbr: str) -> list[dict]:
    """查询 dip_case 表，获得 tx_pattern = 'a(n)' 的病例列表。"""
    query = text(f"""
        SELECT dip_case.setl_id, dip_code, dip_name, dip_score, top_oper_code, top_oper_name, p, m.gnr_c
        FROM dip_case, m
        WHERE tx_pattern = 'a(n)' and dip_case.hsp_abbr = :hsp_abbr
            AND dip_case.setl_id = m.setl_id
    """)
    with engine.connect() as conn:
        result = conn.execute(query, {"hsp_abbr": hsp_abbr})
        data = [dict(row._mapping) for row in result]
    return data

def fetch_dx_dict(engine: Engine, hsp_abbr) -> dict:
    """查询 dx 表，获得 dx 信息字典，key 为 setl_id，value 为 dx_codes 列表。"""
    sql = text(f"""
        with t0 as (select setl_id from dip_case dc where dc.tx_pattern='a(n)' and dc.hsp_abbr = :hsp_abbr)
        select t0.setl_id, dx.dx_code, dx.dx_name
        from dx, t0
        where t0.setl_id = dx.setl_id
    """)
    dx_info_dict = {}
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        for row in result:
            mapping = row._mapping
            setl_id = mapping['setl_id']
            dx_code = mapping['dx_code']
            dx_name = mapping['dx_name']
            if setl_id not in dx_info_dict:
                dx_info_dict[setl_id] = {'dx_codes': [], 'dx_list': []}
            dx_info_dict[setl_id]['dx_codes'].append(dx_code)
            dx_info_dict[setl_id]['dx_list'].append({'dx_code': dx_code, 'dx_name': dx_name})
    # 进一步处理 dx_info_dict：
    # 强烈优先分类章：如果一个 setl_id 下有 o,p 开头的诊断，就删除其余的诊断
    # 一般优先分类章：如果一个 setl_id 下有 a,b,c,f,q,s,t 开头的诊断，就删除其余的诊断
    # 最后分类章：如果一个 setl_id 下有其他字母开头的诊断，就删除 r 开头的诊断
    for setl_id, info in dx_info_dict.items():
        dx_codes = info['dx_codes']
        dx_list = info['dx_list']
        strong_priority_codes = [code for code in dx_codes if code and code.startswith(('o', 'p', 'O', 'P'))]
        general_priority_codes = [code for code in dx_codes if code and code.startswith(('a', 'b', 'c', 'f', 'q', 's', 't',
                                                                              'A', 'B', 'C', 'F', 'Q', 'S', 'T'))]
        last_priority_codes = [code for code in dx_codes if code and not code.startswith(('r', 'R'))]
        if strong_priority_codes:
            dx_info_dict[setl_id]['dx_codes'] = strong_priority_codes
            dx_info_dict[setl_id]['dx_list'] = [dx for dx in dx_list if dx['dx_code'] in strong_priority_codes]
        elif general_priority_codes:
            dx_info_dict[setl_id]['dx_codes'] = general_priority_codes
            dx_info_dict[setl_id]['dx_list'] = [dx for dx in dx_list if dx['dx_code'] in general_priority_codes]
        elif last_priority_codes:
            dx_info_dict[setl_id]['dx_codes'] = last_priority_codes
            dx_info_dict[setl_id]['dx_list'] = [dx for dx in dx_list if dx['dx_code'] in last_priority_codes]
    return dx_info_dict

def fetch_noninvasive_vent_set(engine: Engine, hsp_abbr: str) -> set:
    """查询 tx 表，获得使用无创呼吸机的 setl_id 集合。"""
    sql = text(f"""
        select distinct d.setl_id
        from d
        where d.setl_id in (select setl_id from dip_case dc where dc.tx_pattern='a(n)' and dc.hsp_abbr = :hsp_abbr)
            and d.j_isin = '1'
            and d.item_std_code = '003106030020000-310603002'
    """)
    noninvasive_vent_set = set()
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        for row in result:
            noninvasive_vent_set.add(row._mapping['setl_id'])
    return noninvasive_vent_set

def fetch_invasive_vent_dict(engine: Engine, hsp_abbr: str) -> dict:
    """查询 tx 表，获得使用有创呼吸机的 setl_id 与使用时间字典。"""
    sql = text(f"""
        select d.setl_id, sum(d.q) as sum_q
        from d
        where d.setl_id in (select setl_id from dip_case dc where dc.tx_pattern='a(n)' and dc.hsp_abbr = :hsp_abbr)
            and d.j_isin = '1'
            and d.item_std_code = '003106030010000-310603001'
        group by d.setl_id
        having sum(d.q)>0
    """)
    invasive_vent_dict = {}
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        for row in result:
            mapping = row._mapping
            invasive_vent_dict[mapping['setl_id']] = mapping['sum_q']
    return invasive_vent_dict

# =================
# 待分组数据清洗及上传
# =================
def wash_data(
        data_an0: list[dict], 
        dx_info_dict: dict,
        noninvasive_vent_set: set,   invasive_vent_dict: dict
    ) -> list[dict]:
    """对获得的数据进行清洗，补充手术编码等信息，返回清洗后的结算信息数据列表。"""
    washed_data = []
    data_an = data_an0.copy()
    for record in data_an:
        setl_id = record['setl_id']
        tx_codes = []
        dx_codes = dx_info_dict.get(setl_id, {}).get('dx_codes', [])
        
        # 添加呼吸机手术编码
        if setl_id in invasive_vent_dict:
            hours = invasive_vent_dict[setl_id]
            if hours >= 96:
                tx_codes.append('96.7201')
            else:
                tx_codes.append('96.7101')
        elif setl_id in noninvasive_vent_set:
            tx_codes.append('93.9000')
        
        record['tx_codes'] = tx_codes
        record['dx_codes'] = dx_codes
        washed_data.append(record)
    return washed_data

def wash_data_float(washed_data0: list[dict], clue_dict: dict) -> list[dict]:
    """对获得的数据进行清洗，补充浮动手术编码信息，返回清洗后的结算信息数据列表。"""
    washed_data = washed_data0.copy()
    for record in washed_data:
        setl_id = record['setl_id']
        tx_codes = []

        # 查找最大单价治疗项目，匹配 clue_dict，将这些作为浮动手术池
        tx_float_codes = []
        top_oper_name = record['top_oper_name']
        top_oper_name = '' if top_oper_name is None or top_oper_name!=top_oper_name else top_oper_name.strip()
        if top_oper_name in clue_dict:
            tx_float_codes_raw = clue_dict[top_oper_name]
            for code in tx_float_codes_raw:
                if code not in tx_codes:
                    tx_float_codes.append(code)
        
        record['tx_float_codes'] = tx_float_codes
    return washed_data

def upload_washed_data(
        engine: Engine, hsp_abbr: str,
        washed_data: list[dict]
    ) -> None:
    """将清洗后的数据，按照不同的情形制作不同的虚拟数据，上传到 clue863tmp_m, clue863tmp_dx, clue863tmp_tx 表。"""
    # 数据库处理
    with engine.begin() as conn:
        # 尝试删除已有表，用过程。oracle 不支持 DROP TABLE IF EXISTS
        for table_name in ['clue863tmp_m', 'clue863tmp_dx', 'clue863tmp_tx']:
            try:
                conn.execute(text(f"DROP TABLE {table_name}"))
            except Exception as e:
                print(f"[{elapsed()}] Warning: could not drop table {table_name}: {e}")
        # 创建表
        conn.execute(text("""
            CREATE TABLE clue863tmp_m (
                setl_id VARCHAR2(100) PRIMARY KEY,
                dx_float_code VARCHAR2(100),
                tx_float_code VARCHAR2(100),
                setl_id_base VARCHAR2(50),
                gnr_c NUMBER
            )
        """))
        conn.execute(text("""
            CREATE TABLE clue863tmp_dx (
                setl_id VARCHAR2(100),
                dx_code VARCHAR2(100),
                is_main VARCHAR2(1)
            )
        """))
        conn.execute(text("""
            CREATE TABLE clue863tmp_tx (
                setl_id VARCHAR2(100),
                tx_code VARCHAR2(100),
                is_main VARCHAR2(1)
            )
        """))

    clue863tmp_m_data = []
    clue863tmp_dx_data = []
    clue863tmp_tx_data = []
    washed_data_len = len(washed_data)
    washed_data_ord = 0
    for washed_datum in washed_data:
        # 基础 setl_id_base
        setl_id_base = washed_datum['setl_id']
        # 用 dx_codes 与 tx_float_codes 进行笛卡尔积，制作不同的虚拟病例
        float_cases_ord = 0
        for dx_float_code in washed_datum.get('dx_codes', []):
            for tx_float_code in washed_datum.get('tx_float_codes', []):
                setl_id = f"{setl_id_base}_{float_cases_ord:04d}"
                clue863tmp_m_data.append({
                    'setl_id': setl_id,
                    'dx_float_code': dx_float_code,
                    'tx_float_code': tx_float_code,
                    'setl_id_base': setl_id_base,
                    'gnr_c': washed_datum.get('gnr_c', 0),
                })
                clue863tmp_dx_data.append({
                    'setl_id': setl_id,
                    'dx_code': dx_float_code,
                    'is_main': '1',
                })
                clue863tmp_tx_data.append({
                    'setl_id': setl_id,
                    'tx_code': tx_float_code,
                    'is_main': '1',
                })
                for tx_code in washed_datum.get('tx_codes', []):
                    clue863tmp_tx_data.append({
                        'setl_id': setl_id,
                        'tx_code': tx_code,
                        'is_main': '0',
                    })
                float_cases_ord += 1
        washed_data_ord += 1
        
        # 当达到一定量，则上传一次，避免内存占用过高
        if washed_data_ord % 5000 == 0 or washed_data_ord == washed_data_len:
            print(f"[{elapsed()}] Preparing to upload clue863tmp data for hospital: {hsp_abbr}, processed {washed_data_ord}/{washed_data_len} records.")
            # 数据库处理
            with engine.begin() as conn:
                # 批量插入数据
                if clue863tmp_m_data:
                    conn.execute(
                        text("""
                            INSERT INTO clue863tmp_m (setl_id, dx_float_code, tx_float_code, setl_id_base, gnr_c)
                            VALUES (:setl_id, :dx_float_code, :tx_float_code, :setl_id_base, :gnr_c)
                        """),
                        clue863tmp_m_data
                    )
                    print(f"[{elapsed()}] Uploaded clue863tmp_m data for hospital: {hsp_abbr}")
                if clue863tmp_dx_data:
                    conn.execute(
                        text("""
                            INSERT INTO clue863tmp_dx (setl_id, dx_code, is_main)
                            VALUES (:setl_id, :dx_code, :is_main)
                        """),
                        clue863tmp_dx_data
                    )
                    print(f"[{elapsed()}] Uploaded clue863tmp_dx data for hospital: {hsp_abbr}")
                if clue863tmp_tx_data:
                    conn.execute(
                        text("""
                            INSERT INTO clue863tmp_tx (setl_id, tx_code, is_main)
                            VALUES (:setl_id, :tx_code, :is_main)
                        """),
                        clue863tmp_tx_data
                    )
                    print(f"[{elapsed()}] Uploaded clue863tmp_tx data for hospital: {hsp_abbr}")        
                # 提交事务
                conn.commit()
            # 清空已上传的数据
            clue863tmp_m_data = []
            clue863tmp_dx_data = []
            clue863tmp_tx_data = []

    # 最后，创建索引
    with engine.begin() as conn:
        # 添加索引
        conn.execute(text("CREATE INDEX idx_clue863tmp_m_setl_id_base ON clue863tmp_m(setl_id_base)"))
        conn.execute(text("CREATE INDEX idx_clue863tmp_dx_setl_id ON clue863tmp_dx(setl_id)"))
        conn.execute(text("CREATE INDEX idx_clue863tmp_tx_setl_id ON clue863tmp_tx(setl_id)"))
        conn.execute(text("CREATE INDEX idx_clue863tmp_tx_tx_code ON clue863tmp_tx(tx_code)"))
        conn.execute(text("CREATE INDEX idx_clue863tmp_dx_dx_code ON clue863tmp_dx(dx_code)"))
        conn.commit()
        print(f"[{elapsed()}] Created indexes on clue863tmp tables for hospital: {hsp_abbr}")

# ============
# 分组
# ============

def normalize_pattern(value: object) -> str:
    """Normalize pattern strings for consistent matching logic."""
    if value is None:
        return 'a(n)'
    if isinstance(value, float) and pd.isna(value):
        return 'a(n)'
    pattern = str(value).strip()
    if not pattern or pattern.lower() == 'nan':
        return 'a(n)'
    return pattern.replace(' ', '')


def escape_sql_literal(value: str) -> str:
    """Escape single quotes in SQL literals."""
    if type(value)==str:
        return value.replace("'", "''")
    else:
        return value


def split_pattern(pattern: str, delimiter: str) -> list[str]:
    """Split pattern string by delimiter and drop empty tokens."""
    return [token.strip() for token in pattern.split(delimiter) if token.strip()]


def to_sql_string(value: object) -> str:
    """Convert value to SQL-safe string literal content."""
    if value is None:
        return ''
    if isinstance(value, float) and pd.isna(value):
        return ''
    return escape_sql_literal(str(value))


def build_dx_pattern_select(pattern: str) -> str | None:
    """Generate SQL to select setl_ids that satisfy a dx pattern."""
    if pattern == 'a(n)':
        return None

    if '/' in pattern:
        tokens = split_pattern(pattern, '/')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 6，则不 LIKE，而是完全匹配
        conditions = " OR ".join(
            [f"dx.dx_code = '{escape_sql_literal(token)}'" if len(token) >= 6 else f"dx.dx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        return (
            "SELECT DISTINCT dx.setl_id\n"
            "FROM clue863tmp_dx dx\n"
            "WHERE dx.is_main = 1 AND (" + conditions + ")"
        )

    if '+' in pattern:
        tokens = split_pattern(pattern, '+')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 6，则不 LIKE，而是完全匹配
        like_clause = " OR ".join(
            [f"dx.dx_code = '{escape_sql_literal(token)}'" if len(token) >= 6 else f"dx.dx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        having_clause = " AND ".join(
            [
                f"SUM(CASE WHEN dx.dx_code = '{escape_sql_literal(token)}' THEN 1 ELSE 0 END) > 0"
                for token in tokens
            ]
        )
        return (
            "SELECT dx.setl_id\n"
            "FROM clue863tmp_dx dx\n"
            "WHERE dx.is_main = 1 AND (" + like_clause + ")\n"
            "GROUP BY dx.setl_id\n"
            "HAVING " + having_clause
        )

    return (
        "SELECT DISTINCT dx.setl_id\n"
        "FROM clue863tmp_dx dx\n"
        f"WHERE dx.is_main = 1 AND dx.dx_code LIKE '{escape_sql_literal(pattern)}%'"
    )


def build_tx_pattern_select(pattern: str) -> str | None:
    """Generate SQL to select setl_ids that satisfy a tx pattern."""
    if pattern == 'a(n)':
        return None

    tx_class_map = {
        '一类手术操作': '一类',
        '二类手术操作': '二类',
        '三类手术操作': '三类',
    }

    if pattern in tx_class_map:
        tx_class = escape_sql_literal(tx_class_map[pattern])
        return (
            "SELECT DISTINCT tx.setl_id\n"
            "FROM clue863tmp_tx tx\n"
            "JOIN dip_tx_class dtx ON tx.tx_code = dtx.tx_code\n"
            f"WHERE dtx.tx_class = '{tx_class}'"
        )

    if '/' in pattern:
        tokens = split_pattern(pattern, '/')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 5，则不 LIKE，而是完全匹配
        conditions = " OR ".join(
            [f"tx.tx_code = '{escape_sql_literal(token)}'" if len(token) >= 5 else f"tx.tx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        return (
            "SELECT DISTINCT tx.setl_id\n"
            "FROM clue863tmp_tx tx tx\n"
            "WHERE (" + conditions + ")"
        )

    if '+' in pattern:
        tokens = split_pattern(pattern, '+')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 5，则不 LIKE，而是完全匹配
        like_clause = " OR ".join(
            [f"tx.tx_code = '{escape_sql_literal(token)}'" if len(token) >= 5 else f"tx.tx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        having_clause = " AND ".join(
            [
                f"SUM(CASE WHEN tx.tx_code LIKE '{escape_sql_literal(token)}%' THEN 1 ELSE 0 END) > 0"
                for token in tokens
            ]
        )
        return (
            "SELECT tx.setl_id\n"
            "FROM clue863tmp_tx tx\n"
            "WHERE (" + like_clause + ")\n"
            "GROUP BY tx.setl_id\n"
            "HAVING " + having_clause
        )

    return (
        "SELECT DISTINCT tx.setl_id\n"
        "FROM clue863tmp_tx tx\n"
        f"WHERE tx.tx_code LIKE '{escape_sql_literal(pattern)}%'"
    )


def prepare_pattern_cases_table(
    engine: Engine,
    patterns: list[str],
    table_name: str,
    select_builder: Callable[[str], str | None],
) -> None:
    """Populate a pattern-case mapping table for dx or tx patterns."""
    patterns = [pattern for pattern in patterns if pattern and pattern != 'a(n)']
    if not patterns:
        return

    drop_sql = f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"
    create_sql = (
        f"CREATE TABLE {table_name} (\n"
        "    pattern VARCHAR(200),\n"
        "    setl_id VARCHAR(200)\n"
        ")"
    )

    with engine.connect() as conn:
        conn.execute(text(drop_sql))
        conn.execute(text(create_sql))
        conn.commit()

    total_rows = 0
    total_patterns = len(patterns)
    for idx, pattern in enumerate(patterns, start=1):
        select_sql = select_builder(pattern)
        if not select_sql:
            continue
        insert_sql = (
            f"INSERT INTO {table_name} (pattern, setl_id)\n"
            "SELECT :pattern AS pattern, setl_id\n"
            f"FROM (\n{select_sql}\n)"
        )
        with engine.begin() as conn:
            result = conn.execute(text(insert_sql), {"pattern": pattern})
            inserted = result.rowcount if result.rowcount and result.rowcount > 0 else 0
            total_rows += inserted
        print(
            f"\r[{elapsed()}] Prepared pattern cases {idx}/{total_patterns}, "
            f"{total_rows} rows, pattern {pattern}.",
            end=''
        )
    print()

    index_prefix = f"IDX_{table_name.upper()}"
    with engine.connect() as conn:
        conn.execute(text(f"CREATE INDEX {index_prefix}_PATTERN ON {table_name}(pattern, setl_id)"))
        conn.execute(text(f"CREATE INDEX {index_prefix}_SETL ON {table_name}(setl_id)"))
        conn.commit()
        print(f"[{elapsed()}] Created indexes on {table_name}.")


def prepare_dx_pattern_cases(engine: Engine, dip_library_df: pd.DataFrame, table_name: str) -> None:
    patterns = sorted({normalize_pattern(value) for value in dip_library_df['dx_pattern'].tolist()})
    prepare_pattern_cases_table(engine, patterns, table_name, build_dx_pattern_select)


def prepare_tx_pattern_cases(engine: Engine, dip_library_df: pd.DataFrame, table_name: str) -> None:
    patterns = sorted({normalize_pattern(value) for value in dip_library_df['tx_pattern'].tolist()})
    prepare_pattern_cases_table(engine, patterns, table_name, build_tx_pattern_select)

def create_tmp_dip_table(engine: Engine, table_name: str) -> None:
    """创建 clue863_dip 表

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # 如果表存在，先删除，用ORACLE过程
        conn.execute(text(f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"))
        # 创建新表
        create_table_sql = f"""
            CREATE TABLE {table_name} (
                hsp_abbr VARCHAR(200),
                setl_id VARCHAR(200),
                dip_code VARCHAR(200),
                dip_name VARCHAR(1000),
                dx_pattern VARCHAR(200),
                dx_pattern_name VARCHAR(1000),
                tx_pattern VARCHAR(200),
                tx_pattern_name VARCHAR(1000),
                dip_group_type VARCHAR(200),
                dip_score NUMBER,
                dip_dx_type VARCHAR(200),
                dip_tx_type VARCHAR(200),
                dip_rank_type NUMBER,
                dip_priority NUMBER
            )
        """
        conn.execute(text(create_table_sql))
        conn.commit()
        print(f"[{elapsed()}]Table {table_name} created.")

def _dip_single(
    engine: Engine,
    datum: pd.Series,
    table_name: str,
    dx_cases_table: str,
    tx_cases_table: str,
) -> tuple[int, str]:
    """对单个分组进行 dip 分组."""

    dip_code_display = '' if pd.isna(datum['dip_code']) else str(datum['dip_code'])
    dip_code = to_sql_string(datum['dip_code'])
    dip_name = to_sql_string(datum['dip_name'])

    raw_dx_pattern = datum['dx_pattern']
    if raw_dx_pattern is None or (isinstance(raw_dx_pattern, float) and pd.isna(raw_dx_pattern)):
        raw_dx_pattern = 'a(n)'
    else:
        raw_dx_pattern = str(raw_dx_pattern).strip() or 'a(n)'
    dx_pattern_key = normalize_pattern(raw_dx_pattern)
    dx_pattern_display = to_sql_string(raw_dx_pattern)
    dx_pattern_name = to_sql_string(datum['dx_pattern_name'])

    raw_tx_pattern = datum['tx_pattern']
    if raw_tx_pattern is None or (isinstance(raw_tx_pattern, float) and pd.isna(raw_tx_pattern)):
        raw_tx_pattern = 'a(n)'
    else:
        raw_tx_pattern = str(raw_tx_pattern).strip() or 'a(n)'
    tx_pattern_key = normalize_pattern(raw_tx_pattern)
    tx_pattern_display = to_sql_string(raw_tx_pattern)
    tx_pattern_name = to_sql_string(datum['tx_pattern_name'])

    dip_group_type = to_sql_string(datum['dip_group_type'])
    dip_dx_type = to_sql_string(datum['dip_dx_type'])
    dip_tx_type = to_sql_string(datum['dip_tx_type'])

    dip_score = datum['dip_score']
    dip_rank_type = datum['dip_rank_type']
    dip_score_sql = 'NULL' if pd.isna(dip_score) else str(dip_score)
    dip_rank_type_sql = 'NULL' if pd.isna(dip_rank_type) else str(dip_rank_type)

    join_clauses: list[str] = []
    if dx_pattern_key != 'a(n)':
        join_clauses.append(
            f"JOIN {dx_cases_table} dxp ON dxp.setl_id = m.setl_id AND dxp.pattern = '{escape_sql_literal(dx_pattern_key)}'"
        )
    if tx_pattern_key != 'a(n)':
        join_clauses.append(
            f"JOIN {tx_cases_table} txp ON txp.setl_id = m.setl_id AND txp.pattern = '{escape_sql_literal(tx_pattern_key)}'"
        )
    joins_sql = ("\n" + "\n".join(join_clauses)) if join_clauses else ''

    sql = f"""
        INSERT INTO {table_name} (
            setl_id, dip_code, dip_name, dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
            dip_group_type, dip_score, dip_dx_type, dip_tx_type, dip_rank_type
        )
        SELECT
            m.setl_id,
            '{dip_code}', '{dip_name}', '{dx_pattern_display}', '{dx_pattern_name}', '{tx_pattern_display}', '{tx_pattern_name}',
            '{dip_group_type}', {dip_score_sql}, '{dip_dx_type}', '{dip_tx_type}', {dip_rank_type_sql}
        FROM
             clue863tmp_m m{joins_sql}
    """

    with engine.begin() as conn:
        result = conn.execute(text(sql))
        rowcount = result.rowcount if result.rowcount and result.rowcount > 0 else 0

    return rowcount, dip_code_display

def dip_all(
    engine: Engine,
    dip_library_df0: pd.DataFrame,
    table_name: str,
    dx_cases_table: str,
    tx_cases_table: str,
) -> None:
    """对所有分组进行 dip 分组

    Args:
        engine (Engine): 数据库引擎
        dip_library_df (pd.DataFrame): dipLibrary 数据
        table_name (str): dip 表名
    """
    # 过滤掉 dx_pattern / tx_pattern 在临时表中不存在的行
    dip_library_df = dip_library_df0.copy()
    sql_tx_pattern = text(f"SELECT DISTINCT pattern FROM {tx_cases_table}")
    sql_dx_pattern = text(f"SELECT DISTINCT pattern FROM {dx_cases_table}")
    dx_patterns_in_db = set()
    tx_patterns_in_db = set()
    with engine.connect() as conn:
        result = conn.execute(sql_dx_pattern)
        for row in result:
            dx_patterns_in_db.add(row._mapping['pattern'])
        result = conn.execute(sql_tx_pattern)
        for row in result:
            tx_patterns_in_db.add(row._mapping['pattern'])
    dip_library_df = dip_library_df[
        dip_library_df['dx_pattern'].apply(lambda x: normalize_pattern(x) in dx_patterns_in_db) &
        dip_library_df['tx_pattern'].apply(lambda x: normalize_pattern(x) in tx_patterns_in_db)
    ].reset_index(drop=True)

    total_jobs = len(dip_library_df)
    total_rows = 0
    for idx, row in dip_library_df.iterrows():
        rows, dip_code = _dip_single(engine, row, table_name, dx_cases_table, tx_cases_table)
        total_rows += rows
        print(f"\r[{elapsed()}] {idx + 1}/{total_jobs}, {total_rows} rows, {dip_code}.                                 ", end='')

def add_priority_column(engine: Engine, table_name: str) -> None:
    """为 dip 表添加优先级列

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # 更新优先级列，按照 dip_rank_type 降序，dip_score 降序取 row_number 作为优先级
        update_sql = f"""
            MERGE INTO {table_name} target
            USING (
                SELECT setl_id, dip_code, 
                       ROW_NUMBER() OVER (PARTITION BY setl_id ORDER BY dip_rank_type DESC, dip_score DESC) AS priority
                FROM {table_name}
            ) src
            ON (target.setl_id = src.setl_id AND target.dip_code = src.dip_code)
            WHEN MATCHED THEN
            UPDATE SET target.dip_priority = src.priority
        """
        conn.execute(text(update_sql))
        conn.commit()
        print(f"[{elapsed()}] Added priority column to {table_name}.")

def create_indexes(engine: Engine, table_name: str) -> None:
    """为 dip 表创建索引

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # conn.execute(text(f"CREATE INDEX idx_dip_setl_id ON {table_name}(setl_id)"))
        conn.execute(text(f"CREATE INDEX idx_clue863_dip_rank_type ON {table_name}(dip_rank_type)"))
        conn.execute(text(f"CREATE INDEX idx_clue863_dip_score ON {table_name}(dip_score)"))
        conn.commit()
        print(f"[{elapsed()}] Created indexes on {table_name}.")

def create_tmp_dip_case_table(engine: Engine, table_name: str, table_source_name: str) -> None:
    """创建 clue863_dip_case 表

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip_case 表名
    """
    with engine.connect() as conn:
        # 如果表存在，先删除，用ORACLE过程
        conn.execute(text(f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"))
        # 创建新表
        create_table_sql = f"""
            CREATE TABLE {table_name} (
                hsp_abbr VARCHAR2(200),
                setl_id VARCHAR2(200) PRIMARY KEY,
                dip_code VARCHAR2(200),
                dip_name VARCHAR2(1000),
                dx_pattern VARCHAR2(200),
                dx_pattern_name VARCHAR2(1000),
                tx_pattern VARCHAR2(200),
                tx_pattern_name VARCHAR2(1000),
                dip_group_type VARCHAR2(200),
                dip_dx_type VARCHAR2(200),
                dip_tx_type VARCHAR2(200),
                dip_rank_type NUMBER,
                dip_score NUMBER,
                top_oper_code VARCHAR2(200),
                top_oper_name VARCHAR2(1000),
                p NUMBER,
                is_rescue VARCHAR2(1)
            )
        """
        conn.execute(text(create_table_sql))
        conn.commit()
        # 插入优先级为1的数据
        insert_sql = f"""
            INSERT INTO {table_name} (
                hsp_abbr, setl_id, dip_code, dip_name, 
                dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
                dip_group_type, dip_dx_type, dip_tx_type, dip_rank_type,
                dip_score
            )
            SELECT hsp_abbr, setl_id, dip_code, dip_name, dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
                   dip_group_type, dip_dx_type, dip_tx_type, dip_rank_type, dip_score
            FROM {table_source_name} dip
            WHERE dip_priority = 1
        """
        conn.execute(text(insert_sql))
        conn.commit()
        print(f"[{elapsed()}]Table {table_name} created.")

# ============
# 分析模拟分组结果
# ============
def _fetch_tmp_dip_case(
    engine: Engine,
    table_name_of_dip_case: str = 'clue863_dip_case',
    table_name_of_m: str = 'clue863tmp_m'
) -> pd.DataFrame:
    """获得 clue863_dip_case 表数据，返回 DataFrame。"""
    query = text(f"""
        select 
            m.setl_id_base, dc.setl_id, dc.dip_code, dc.dip_name, dc.dip_score, 
            m.dx_float_code, m.tx_float_code, m0.insutype, 
            dc0.TOP_OPER_CODE, dc0.TOP_OPER_NAME,
            dxs.dxmaincode, dxs.dxmainname, dxs.dxcodeall, dxs.dxnameall,
            txs.txmaincode, txs.txmainname, txs.txcodeall, txs.txnameall
        from {table_name_of_dip_case} dc, m m0, dip_case dc0, {table_name_of_m} m
            LEFT OUTER JOIN (
                SELECT
                setl_id,
                listagg(distinct case when dx.is_main=1 then dx.dx_code else null end,',') within group(order by dx.dx_code) as dxmaincode,
                listagg(distinct case when dx.is_main=1 then dx.dx_hsp_name else null end,',') within group(order by dx.dx_code) as dxmainname,
                listagg(distinct dx.dx_code,',') within group(order by dx.dx_code) as dxcodeall,
                listagg(distinct dx.dx_hsp_name,',') within group(order by dx.dx_hsp_name) as dxnameall
            FROM dx
            GROUP BY dx.setl_id
            ) dxs on m.setl_id_base = dxs.setl_id
            LEFT OUTER JOIN (
                SELECT
                setl_id,
                listagg(distinct case when tx.is_main=1 then tx.tx_code else null end,',') within group(order by tx.tx_code) as txmaincode,
                listagg(distinct case when tx.is_main=1 then tx.tx_hsp_name else null end,',') within group(order by tx.tx_code) as txmainname,
                listagg(distinct tx.tx_code,',') within group(order by tx.tx_code) as txcodeall,
                listagg(distinct tx.tx_hsp_name,',') within group(order by tx.tx_hsp_name) as txnameall
            FROM tx
            GROUP BY tx.setl_id
            ) txs ON m.setl_id_base = txs.setl_id
        where 1=1
            and dc.setl_id = m.setl_id
            and m.setl_id_base = m0.setl_id
            and m.setl_id_base = dc0.setl_id
    """)
    with engine.connect() as conn:
        result = conn.execute(query)
        data = [dict(row._mapping) for row in result]
    df = pd.DataFrame(data)
    return df

def _cal_dip_amount(
        dip_score: float, 
        gnr_c: float, price310: float, price390: float, 
        factor: float, insutype: str, is_strict: bool = True
    ) -> float:
    """计算 dip 金额。"""
    price = ( price310 if insutype == '310' else price390 ) * factor
    avg_c = dip_score * price
    if gnr_c > avg_c * 2:
        if is_strict:
            dip_amount = ( gnr_c / avg_c ) * dip_score * price
        else:
            dip_amount = 0
    elif gnr_c < avg_c * 0.5:
        if is_strict:
            dip_amount = gnr_c
        else:
            dip_amount = 0
    else:
        if is_strict:
            dip_amount = dip_score * price
        else:
            dip_amount = dip_score * price
    return dip_amount

def analyze_dip_results(
        washed_data: list[dict], icd_name_dict: dict,
        price310: float, price390: float, factor: float,
        engine: Engine
    ) -> None:
    """分析分组结果，输出统计信息。"""
    df_dip_case = _fetch_tmp_dip_case(engine, 'clue863_dip_case')
    washed_data_len = len(washed_data)
    # 对每一个 washed_data 进行分析，保留分析结果
    analysed_data = []
    for idx, washed_datum in enumerate(washed_data):
        setl_id_base = washed_datum['setl_id']
        gnr_c = float(washed_datum.get('gnr_c', 0))
        dip_score_origin = washed_datum['dip_score']
        dip_code_origin = washed_datum['dip_code']
        dip_name_origin = washed_datum['dip_name']
        top_oper_code = washed_datum['top_oper_code']
        top_oper_name = washed_datum['top_oper_name']
        # 从其他df_case_data获取更多的第一级别的信息
        dip_case_data = df_dip_case[df_dip_case['setl_id'].str.startswith(setl_id_base + '_')].to_dict(orient='records')
        # 如果没有，就跳过下一个
        if not dip_case_data:
            continue
        insutype = dip_case_data[0]['insutype']
        dxmaincode = dip_case_data[0]['dxmaincode']
        dxmainname = dip_case_data[0]['dxmainname']
        dxcodeall = dip_case_data[0]['dxcodeall']
        dxnameall = dip_case_data[0]['dxnameall']
        txcodeall = dip_case_data[0]['txcodeall']
        txnameall = dip_case_data[0]['txnameall']
        dip_amount_origin = _cal_dip_amount(dip_score_origin, gnr_c, price310, price390, factor, insutype, is_strict=True)
        # 如果 dip_amount_origin >= gnr_c，则跳过，不作分析
        if dip_amount_origin >= gnr_c:
            continue
        analysed_datum = {
            'setl_id': setl_id_base,
            'gnr_c': gnr_c,
            'dip_code_origin': dip_code_origin,
            'dip_name_origin': dip_name_origin,
            'dip_score_origin': dip_score_origin,
            'dip_amount_origin': dip_amount_origin,
            'dxmaincode': dxmaincode,
            'dxmainname': dxmainname,
            'dxallcode': dxcodeall,
            'dxallname': dxnameall,
            'txallcode': txcodeall,
            'txallname': txnameall,
            'top_oper_code': top_oper_code,
            'top_oper_name': top_oper_name,
        }
        simulate_groups_dict = {}
        for dip_case_datum in dip_case_data:
            dip_code_simulate = dip_case_datum['dip_code']
            dip_name_simulate = dip_case_datum['dip_name']
            dip_score_simulate = dip_case_datum['dip_score']
            dx_float_code = dip_case_datum['dx_float_code']
            tx_float_code = dip_case_datum['tx_float_code']
            # 如果 dip_score_simulate <= dip_score_origin ，则跳过，不作分析
            if dip_score_simulate <= dip_score_origin:
                continue
            dip_amount_simulate = _cal_dip_amount(dip_score_simulate, gnr_c, price310, price390, factor, insutype)
            # 如果 dip_amount_simulate <= gnr_c，则跳过，不作分析
            if dip_amount_simulate <= gnr_c:
                continue
            dx_key = dx_float_code.lower() if isinstance(dx_float_code, str) else dx_float_code
            tx_key = tx_float_code.lower() if isinstance(tx_float_code, str) else tx_float_code
            float_pair = {
                'dx_float_code': dx_float_code,
                'dx_float_name': icd_name_dict.get(dx_key, icd_name_dict.get(dx_float_code, '')),
                'tx_float_code': tx_float_code,
                'tx_float_name': icd_name_dict.get(tx_key, icd_name_dict.get(tx_float_code, '')),
            }
            dip_amount_diff = dip_amount_simulate - dip_amount_origin
            # 如果 dip_amount_diff <= 100，则跳过，不作分析
            if dip_amount_diff <= 100:
                continue
            # 如果 dip_code_simulate 还没存在，就新增一个
            if dip_code_simulate not in simulate_groups_dict:
                simulate_groups_dict[dip_code_simulate] = {
                    'dip_code_simulate': dip_code_simulate,
                    'dip_name_simulate': dip_name_simulate,
                    'dip_score_simulate': dip_score_simulate,
                    'dx_tx_float_pair': [float_pair],
                    'dip_amount_simulate': dip_amount_simulate,
                    'dip_amount_diff': dip_amount_simulate - dip_amount_origin,
                }
            else:
                pairs = simulate_groups_dict[dip_code_simulate]['dx_tx_float_pair']
                if not any(
                    pair.get('dx_float_code') == dx_float_code and pair.get('tx_float_code') == tx_float_code
                    for pair in pairs
                ):
                    pairs.append(float_pair)
        # 将 simulate_groups_dict 转为列表
        simulate_groups = list(simulate_groups_dict.values())
        if simulate_groups:
            analysed_datum['simulate_groups'] = simulate_groups
            analysed_data.append(analysed_datum)
        # inline的打印一下进度
        print(f"\r[{elapsed()}] Analyzed {idx + 1}/{washed_data_len} records.", end='')
    print()

    # 统计每一个 setl_id 的 dx_float_code 搭配 tx_float_code 的数量，以及这个 setl_id 的 最小的 dip_amount_diff
    setl_stats: dict[str, dict[str, object]] = {}
    for analysed_datum in analysed_data:
        setl_id = analysed_datum['setl_id']
        simulate_groups = analysed_datum.get('simulate_groups') or []
        total_pairs = 0
        min_dip_amount_diff = None
        for group in simulate_groups:
            dip_amount_diff = group['dip_amount_diff']
            float_pairs = group.get('dx_tx_float_pair') or []
            total_pairs += len(float_pairs)
            if min_dip_amount_diff is None or dip_amount_diff < min_dip_amount_diff:
                min_dip_amount_diff = dip_amount_diff
        setl_stats[setl_id] = {
            'total_dx_tx_pairs': total_pairs,
            'min_dip_amount_diff': min_dip_amount_diff,
        }
    print(f"[{elapsed()}] Calculated setl_stats for {len(setl_stats)} setl_ids.")
    
    # 对 setl_stats 进行修剪，因为 excel 输出时不需要太多数据，按照 min_dip_amount_diff 降序排序，取 pairs 数量不超过 60000 的 setl_id
    sorted_setl_stats = sorted(
        setl_stats.items(),
        key=lambda item: (item[1]['min_dip_amount_diff'] if item[1]['min_dip_amount_diff'] is not None else float('inf')),
        reverse=True
    )
    trimmed_setl_ids = set()
    total_pairs_counted = 0
    for setl_id, stats in sorted_setl_stats:
        total_pairs = stats['total_dx_tx_pairs']
        if total_pairs_counted + total_pairs > 60000:
            break
        trimmed_setl_ids.add(setl_id)
        total_pairs_counted += total_pairs
    print(f"[{elapsed()}] Trimmed setl_ids to {len(trimmed_setl_ids)} with total pairs {total_pairs_counted}.")

    analysed_data = [analysed_datum for analysed_datum in analysed_data if analysed_datum['setl_id'] in trimmed_setl_ids]
    print(f"[{elapsed()}] Final analysed data count: {len(analysed_data)}.")

    # 输出分析结果
    return analysed_data

# 输出分析结果作为线索
def output_results(
    analysed_data: list[dict],
    hsp_abbr: str
) -> None:
    """Export analysed clue data to an Excel workbook with merged hierarchical rows."""

    file_path_pre = r'STEP8筛选/clue'
    os.makedirs(file_path_pre, exist_ok=True)
    os.makedirs(os.path.join(file_path_pre, hsp_abbr), exist_ok=True)
    file_name = f"clue863_手术缺失或主诊断不对应线索_内部_{hsp_abbr}.xlsx"
    file_path = os.path.join(file_path_pre, hsp_abbr, file_name)

    level1_columns = [
        ('setl_id', '结算ID'),
        ('gnr_c', '总费用'),
        ('dip_code_origin', '原DIP编号'),
        ('dip_name_origin', '原DIP名称'),
        ('dip_score_origin', '原DIP分值'),
        ('dip_amount_origin', '原DIP结算额'),
        ('dxmaincode', '主诊断'),
        ('dxmainname', '主诊断名称'),
        ('dxallcode', '全诊断'),
        ('dxallname', '全诊断名称'),
        ('txallcode', '全手术'),
        ('txallname', '全手术名称'),
        ('top_oper_code', '关键项目编码'),
        ('top_oper_name', '关键项目名称'),
    ]

    level2_columns = [
        ('dip_code_simulate', '校正DIP编号'),
        ('dip_name_simulate', '校正DIP名称'),
        ('dip_score_simulate', '校正DIP分值'),
        ('dip_amount_simulate', '校正DIP结算额'),
        ('dip_amount_diff', '校正增加额'),
    ]

    level3_columns = [
        ('dx_float_code', '校正主诊断'),
        ('dx_float_name', '校正主诊断名称'),
        ('tx_float_code', '校正手术'),
        ('tx_float_name', '校正手术名称'),
    ]

    all_columns = level1_columns + level2_columns + level3_columns

    wb = openpyxl.Workbook()
    ws = wb.active
    ws.title = 'clue863'
    ws.freeze_panes = 'A2'

    header_alignment = Alignment(horizontal='center', vertical='center')
    vertical_alignment = Alignment(vertical='center')

    for col_idx, (_, header) in enumerate(all_columns, start=1):
        cell = ws.cell(row=1, column=col_idx, value=header)
        cell.alignment = header_alignment

    current_row = 2
    level1_count = len(level1_columns)
    level2_count = len(level2_columns)
    level3_start = level1_count + level2_count + 1

    for analysed in analysed_data:
        simulate_groups = analysed.get('simulate_groups') or []

        if not simulate_groups:
            row_idx = current_row
            for col_idx, (field, _) in enumerate(level1_columns, start=1):
                cell = ws.cell(row=row_idx, column=col_idx, value=analysed.get(field))
                cell.alignment = vertical_alignment
            current_row += 1
            continue

        grouped_pairs: list[tuple[dict, list[dict | None]]]
        grouped_pairs = []
        for group in simulate_groups:
            float_pairs = group.get('dx_tx_float_pair') or []
            if not float_pairs:
                float_pairs = [None]
            grouped_pairs.append((group, float_pairs))

        total_rows = sum(len(pairs) for _, pairs in grouped_pairs)
        setl_start = current_row

        for group, pairs in grouped_pairs:
            group_start = current_row
            for pair in pairs:
                row_idx = current_row

                if row_idx == setl_start:
                    for col_idx, (field, _) in enumerate(level1_columns, start=1):
                        cell = ws.cell(row=row_idx, column=col_idx, value=analysed.get(field))
                        cell.alignment = vertical_alignment

                if row_idx == group_start:
                    for offset, (field, _) in enumerate(level2_columns, start=level1_count + 1):
                        cell = ws.cell(row=row_idx, column=offset, value=group.get(field))
                        cell.alignment = vertical_alignment

                if pair is not None:
                    for offset, (field, _) in enumerate(level3_columns, start=level3_start):
                        ws.cell(row=row_idx, column=offset, value=pair.get(field))

                current_row += 1

            if len(pairs) > 1:
                for idx_offset in range(level2_count):
                    col_idx = level1_count + 1 + idx_offset
                    ws.merge_cells(
                        start_row=group_start,
                        start_column=col_idx,
                        end_row=current_row - 1,
                        end_column=col_idx,
                    )
                    ws.cell(row=group_start, column=col_idx).alignment = vertical_alignment

        if total_rows > 1:
            for idx_offset in range(level1_count):
                col_idx = 1 + idx_offset
                ws.merge_cells(
                    start_row=setl_start,
                    start_column=col_idx,
                    end_row=setl_start + total_rows - 1,
                    end_column=col_idx,
                )
                ws.cell(row=setl_start, column=col_idx).alignment = vertical_alignment

    # 可见范围内增加边框
    thin_border = Border(
        left=Side(style='thin'),
        right=Side(style='thin'),
        top=Side(style='thin'),
        bottom=Side(style='thin')
    )

    # 一、二级字段自动换行
    for row in ws.iter_rows(min_row=1, max_row=current_row - 1, min_col=1, max_col=level1_count + level2_count):
        for cell in row:
            cell.alignment = Alignment(wrap_text=True, vertical='center')
            cell.border = thin_border
    
    # 三级字段只设置边框
    for row in ws.iter_rows(min_row=1, max_row=current_row - 1, min_col=level3_start, max_col=len(all_columns)):
        for cell in row:
            cell.border = thin_border
    
    # 全诊断名称、全手术名称列宽调整
    dxallname_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'dxallname')
    txallname_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'txallname')
    ws.column_dimensions[openpyxl.utils.get_column_letter(dxallname_col_idx)].width = 40
    ws.column_dimensions[openpyxl.utils.get_column_letter(txallname_col_idx)].width = 40

    # 全诊断、全手术、关键项目编码列设置为文本格式，列宽调整
    dxallcode_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'dxallcode')
    txallcode_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'txallcode')
    top_oper_code_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'top_oper_code')
    for row in ws.iter_rows(min_row=2, max_row=current_row - 1, min_col=dxallcode_col_idx, max_col=dxallcode_col_idx):
        for cell in row:
            cell.number_format = '@'
    for row in ws.iter_rows(min_row=2, max_row=current_row - 1, min_col=txallcode_col_idx, max_col=txallcode_col_idx):
        for cell in row:
            cell.number_format = '@'
    for row in ws.iter_rows(min_row=2, max_row=current_row - 1, min_col=top_oper_code_col_idx, max_col=top_oper_code_col_idx):
        for cell in row:
            cell.number_format = '@'
    ws.column_dimensions[openpyxl.utils.get_column_letter(dxallcode_col_idx)].width = 40
    ws.column_dimensions[openpyxl.utils.get_column_letter(txallcode_col_idx)].width = 40
    ws.column_dimensions[openpyxl.utils.get_column_letter(top_oper_code_col_idx)].width = 40

    # 校正DIP名称列宽调整
    dip_name_simulate_col_idx = next(idx + 1 for idx, (field, _) in enumerate(all_columns) if field == 'dip_name_simulate')
    ws.column_dimensions[openpyxl.utils.get_column_letter(dip_name_simulate_col_idx)].width = 40

    wb.save(file_path)
    print(f"[{elapsed()}] Exported clue results to {file_path}")

# 主函数
if __name__ == "__main__":
    # 定义常量
    price310 = PRICE310
    price390 = PRICE390
    factor = FACTOR
    # 或者针对每个 hsp 定义不同 factor
    hspFactor_dict = {}

    # 获得医院列表
    hsp_list = get_hsp_list()

    # 创建数据库引擎
    engine = create_db_engine()

    # 读入各种库
    clue_dict = load_clue_library()
    df_dip_library = load_dip_library()
    icd_name_dict = load_icd_name(engine, df_dip_library)
    
    for hsp_datum in hsp_list:
        hsp_abbr = hsp_datum['hsp_abbr']
        print(f"[{elapsed()}] Processing hospital: {hsp_abbr}")

        # 查询数据库，获得信息
        data_an0 = fetch_an_data(engine, hsp_abbr)
        print(f"[{elapsed()}] Fetched {len(data_an0)} a(n) cases for hospital: {hsp_abbr}")

        dx_info_dict = fetch_dx_dict(engine, hsp_abbr)
        print(f"[{elapsed()}] Fetched {len(dx_info_dict)} dx cases for hospital: {hsp_abbr}")

        noninvasive_vent_set = fetch_noninvasive_vent_set(engine, hsp_abbr)
        print(f"[{elapsed()}] Fetched {len(noninvasive_vent_set)} non-invasive vent cases for hospital: {hsp_abbr}")

        invasive_vent_dict = fetch_invasive_vent_dict(engine, hsp_abbr)
        print(f"[{elapsed()}] Fetched {len(invasive_vent_dict)} invasive vent cases for hospital: {hsp_abbr}")

        # 待分组数据清洗及上传
        washed_data = wash_data(
            data_an0,
            dx_info_dict,
            noninvasive_vent_set, invasive_vent_dict
        )
        print(f"[{elapsed()}] Washed data for hospital: {hsp_abbr}")
        
        washed_data = wash_data_float(washed_data, clue_dict)
        print(f"[{elapsed()}] Washed float data for hospital: {hsp_abbr}")

        upload_washed_data(engine, hsp_abbr, washed_data)
        print(f"[{elapsed()}] Uploaded washed data for hospital: {hsp_abbr}")

        # 分组准备工作
        prepare_dx_pattern_cases(engine, df_dip_library, 'clue863_dx_pattern_cases')
        print(f"[{elapsed()}] Prepared dx pattern cases for hospital: {hsp_abbr}")

        prepare_tx_pattern_cases(engine, df_dip_library, 'clue863_tx_pattern_cases')
        print(f"[{elapsed()}] Prepared tx pattern cases for hospital: {hsp_abbr}")

        # 创建临时 dip 表
        create_tmp_dip_table(engine, 'clue863_dip')
        print(f"[{elapsed()}] Created temporary dip table for hospital: {hsp_abbr}")

        # 进行 dip 分组
        dip_all(
            engine,
            df_dip_library,
            'clue863_dip',
            'clue863_dx_pattern_cases',
            'clue863_tx_pattern_cases'
        )
        # 添加优先级列
        add_priority_column(engine, 'clue863_dip')
        # 创建索引
        create_indexes(engine, 'clue863_dip')
        # 创建临时 dip_case 表
        create_tmp_dip_case_table(engine, 'clue863_dip_case', 'clue863_dip')

        # 分析分组结果
        # washed_data: list[dict], icd_name_dict: dict, price310: float, price390: float, factor: float, engine: Engine
        analysed_data = analyze_dip_results(
            washed_data,
            icd_name_dict,
            price310,
            price390,
            hspFactor_dict.get(hsp_abbr, factor),
            engine
        )
        output_results(analysed_data, hsp_abbr)

        print(f"[{elapsed()}] Completed processing hospital: {hsp_abbr}")

print(f"[{elapsed()}] All hospitals processed.")