"""step5_7_create_dip.py

目的：
    创建 dip 表，对 m 表中普通住院的病例进行分组
    （仅当自查自纠且有手术TX表数据时才使用，飞检时不使用）

步骤概要：
    1. 读取 dipLibrary.xlsx
        1.1 读取 sheet 'dipLibrary'，获取 dip 各组的入组标准与信息
        1.2 读取 sheet 'txType'，获取手术编码分类
    2. 构建 dx_ptn_cases / tx_ptn_cases 临时表，缓存各 pattern 下的病例 setl_id
    3. 新建 dip 表（如果有就先删除）
    4. 遍历 dipLibrary 中的各组，基于临时表插入符合条件的病例
    5. 增加优先级列
    6. 创建索引
    7. 创建 dip_case 表，存储每个病例的最终分组结果
    8. 更新 dip_case 表，查询 d 表，将每一个 setl_id 的单价(p)最高的 item_code, item_name, p，更新到 dip_case 表中
    9. 创建索引
    10. 导出分析表

运行：
   python STEP5干净表/step5_7_create_dip.py
"""
from __future__ import annotations
import os
import sys
from pathlib import Path
from typing import Callable
import argparse
import pandas as pd
from sqlalchemy import text
from sqlalchemy.engine import Engine
import time

ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if ROOT_DIR not in sys.path:
    sys.path.append(ROOT_DIR)

from config import create_db_engine  # noqa: E402


# 时间函数
t0 = time.time()
def elapsed() -> str:
    """返回自脚本启动以来的耗时 (HH:MM:SS)。
    用于将原先的 [INFO]/[START]/[DONE] 等等级标签替换成实时耗时。
    """
    
    # 实际时间，yyy-mm-dd hh:mm:ss
    timeStr = time.strftime("%H:%M:%S", time.localtime())
    
    # 已消耗时间 XX hours XX minutes XX.XX seconds
    delta = int(time.time() - t0)
    if delta < 60:
        return f"{timeStr} (+ {delta} sec)"
    elif delta < 3600:
        m, s = divmod(delta, 60)
        return f"{timeStr} (+ {m} min {s} sec)"
    elif delta < 86400:
        h, rem = divmod(delta, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {h} hour {m} min {s} sec)"
    else:
        d, rem = divmod(delta, 86400)
        h, rem = divmod(rem, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {d} day {h} hour {m} min {s} sec)"


def normalize_pattern(value: object) -> str:
    """Normalize pattern strings for consistent matching logic."""
    if value is None:
        return 'a(n)'
    if isinstance(value, float) and pd.isna(value):
        return 'a(n)'
    pattern = str(value).strip()
    if not pattern or pattern.lower() == 'nan':
        return 'a(n)'
    return pattern.replace(' ', '')


def escape_sql_literal(value: str) -> str:
    """Escape single quotes in SQL literals."""
    if type(value)==str:
        return value.replace("'", "''")
    else:
        return value


def split_pattern(pattern: str, delimiter: str) -> list[str]:
    """Split pattern string by delimiter and drop empty tokens."""
    return [token.strip() for token in pattern.split(delimiter) if token.strip()]


def to_sql_string(value: object) -> str:
    """Convert value to SQL-safe string literal content."""
    if value is None:
        return ''
    if isinstance(value, float) and pd.isna(value):
        return ''
    return escape_sql_literal(str(value))


def build_dx_pattern_select(pattern: str) -> str | None:
    """Generate SQL to select setl_ids that satisfy a dx pattern."""
    if pattern == 'a(n)':
        return None

    if '/' in pattern:
        tokens = split_pattern(pattern, '/')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 6，则不 LIKE，而是完全匹配
        conditions = " OR ".join(
            [f"dx.dx_code = '{escape_sql_literal(token)}'" if len(token) >= 6 else f"dx.dx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        return (
            "SELECT DISTINCT dx.setl_id\n"
            "FROM dx\n"
            "WHERE dx.is_main = 1 AND (" + conditions + ")"
        )

    if '+' in pattern:
        tokens = split_pattern(pattern, '+')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 6，则不 LIKE，而是完全匹配
        like_clause = " OR ".join(
            [f"dx.dx_code = '{escape_sql_literal(token)}'" if len(token) >= 6 else f"dx.dx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        having_clause = " AND ".join(
            [
                f"SUM(CASE WHEN dx.dx_code = '{escape_sql_literal(token)}' THEN 1 ELSE 0 END) > 0"
                for token in tokens
            ]
        )
        return (
            "SELECT dx.setl_id\n"
            "FROM dx\n"
            "WHERE dx.is_main = 1 AND (" + like_clause + ")\n"
            "GROUP BY dx.setl_id\n"
            "HAVING " + having_clause
        )

    return (
        "SELECT DISTINCT dx.setl_id\n"
        "FROM dx\n"
        f"WHERE dx.is_main = 1 AND dx.dx_code LIKE '{escape_sql_literal(pattern)}%'"
    )


def build_tx_pattern_select(pattern: str) -> str | None:
    """Generate SQL to select setl_ids that satisfy a tx pattern."""
    if pattern == 'a(n)':
        return None

    tx_class_map = {
        '一类手术操作': '一类',
        '二类手术操作': '二类',
        '三类手术操作': '三类',
    }

    if pattern in tx_class_map:
        tx_class = escape_sql_literal(tx_class_map[pattern])
        return (
            "SELECT DISTINCT tx.setl_id\n"
            "FROM tx tx\n"
            "JOIN dip_tx_class dtx ON tx.tx_code = dtx.tx_code\n"
            f"WHERE dtx.tx_class = '{tx_class}'"
        )

    if '/' in pattern:
        tokens = split_pattern(pattern, '/')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 5，则不 LIKE，而是完全匹配
        conditions = " OR ".join(
            [f"tx.tx_code = '{escape_sql_literal(token)}'" if len(token) >= 5 else f"tx.tx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        return (
            "SELECT DISTINCT tx.setl_id\n"
            "FROM tx tx\n"
            "WHERE (" + conditions + ")"
        )

    if '+' in pattern:
        tokens = split_pattern(pattern, '+')
        if not tokens:
            return None
        # 如果单个 token 的长度 >= 5，则不 LIKE，而是完全匹配
        like_clause = " OR ".join(
            [f"tx.tx_code = '{escape_sql_literal(token)}'" if len(token) >= 5 else f"tx.tx_code LIKE '{escape_sql_literal(token)}%'" for token in tokens]
        )
        having_clause = " AND ".join(
            [
                f"SUM(CASE WHEN tx.tx_code LIKE '{escape_sql_literal(token)}%' THEN 1 ELSE 0 END) > 0"
                for token in tokens
            ]
        )
        return (
            "SELECT tx.setl_id\n"
            "FROM tx tx\n"
            "WHERE (" + like_clause + ")\n"
            "GROUP BY tx.setl_id\n"
            "HAVING " + having_clause
        )

    return (
        "SELECT DISTINCT tx.setl_id\n"
        "FROM tx tx\n"
        f"WHERE tx.tx_code LIKE '{escape_sql_literal(pattern)}%'"
    )


def prepare_pattern_cases_table(
    engine: Engine,
    patterns: list[str],
    table_name: str,
    select_builder: Callable[[str], str | None],
) -> None:
    """Populate a pattern-case mapping table for dx or tx patterns."""
    patterns = [pattern for pattern in patterns if pattern and pattern != 'a(n)']
    if not patterns:
        return

    drop_sql = f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"
    create_sql = (
        f"CREATE TABLE {table_name} (\n"
        "    pattern VARCHAR(200),\n"
        "    setl_id VARCHAR(200)\n"
        ")"
    )

    with engine.connect() as conn:
        conn.execute(text(drop_sql))
        conn.execute(text(create_sql))
        conn.commit()

    total_rows = 0
    total_patterns = len(patterns)
    for idx, pattern in enumerate(patterns, start=1):
        select_sql = select_builder(pattern)
        if not select_sql:
            continue
        insert_sql = (
            f"INSERT INTO {table_name} (pattern, setl_id)\n"
            "SELECT :pattern AS pattern, setl_id\n"
            f"FROM (\n{select_sql}\n)"
        )
        with engine.begin() as conn:
            result = conn.execute(text(insert_sql), {"pattern": pattern})
            inserted = result.rowcount if result.rowcount and result.rowcount > 0 else 0
            total_rows += inserted
        print(
            f"\r[{elapsed()}] Prepared pattern cases {idx}/{total_patterns}, "
            f"{total_rows} rows, pattern {pattern}.",
            end=''
        )
    print()

    index_prefix = f"IDX_{table_name.upper()}"
    with engine.connect() as conn:
        conn.execute(text(f"CREATE INDEX {index_prefix}_PATTERN ON {table_name}(pattern, setl_id)"))
        conn.execute(text(f"CREATE INDEX {index_prefix}_SETL ON {table_name}(setl_id)"))
        conn.commit()
        print(f"[{elapsed()}] Created indexes on {table_name}.")


def prepare_dx_pattern_cases(engine: Engine, dip_library_df: pd.DataFrame, table_name: str) -> None:
    patterns = sorted({normalize_pattern(value) for value in dip_library_df['dx_pattern'].tolist()})
    prepare_pattern_cases_table(engine, patterns, table_name, build_dx_pattern_select)


def prepare_tx_pattern_cases(engine: Engine, dip_library_df: pd.DataFrame, table_name: str) -> None:
    patterns = sorted({normalize_pattern(value) for value in dip_library_df['tx_pattern'].tolist()})
    prepare_pattern_cases_table(engine, patterns, table_name, build_tx_pattern_select)

# ------------------------------
#  读取分值库
# ------------------------------
def read_dip_library(file_path: str) -> tuple[pd.DataFrame, pd.DataFrame]:
    """读取 dipLibrary.xlsx，获取 dip 各组的入组标准与信息，以及手术编码分类

    Args:
        file_path (str): dipLibrary.xlsx 文件路径

    Returns:
        pd.DataFrame: dip 各组的入组标准与信息
        pd.DataFrame: 手术编码分类
    """
    dip_library_df = pd.read_excel(file_path, sheet_name='dip_library')
    tx_type_df = pd.read_excel(file_path, sheet_name='tx_type')
    print(f"Read dip library from {file_path}")
    return dip_library_df, tx_type_df

def upload_tx_class(engine: Engine, tx_type_df: pd.DataFrame) -> None:
    """上传手术编码分类到数据库

    Args:
        engine (Engine): 数据库引擎
        tx_type_df (pd.DataFrame): 手术编码分类数据
    """
    with engine.connect() as conn:
        # 如果表存在，先删除，用ORACLE过程
        conn.execute(text("BEGIN EXECUTE IMMEDIATE 'DROP TABLE dip_tx_class'; EXCEPTION WHEN OTHERS THEN NULL; END;"))
        # 创建新表
        create_table_sql = """
            CREATE TABLE dip_tx_class (
                tx_code VARCHAR(200),
                tx_name VARCHAR(1000),
                tx_class VARCHAR(200)
            )
        """
        conn.execute(text(create_table_sql))
        # 插入数据
        tx_type_df.to_sql('dip_tx_class', con=conn, if_exists='append', index=False)
        conn.commit()
        # 创建索引
        conn.execute(text("CREATE INDEX idx_tx_code ON dip_tx_class(tx_code)"))
        conn.execute(text("CREATE INDEX idx_tx_class ON dip_tx_class(tx_class)"))
        print(f"[{elapsed()}]Uploaded dip_tx_class table.")

# ------------------------------
#  创建 dip 表
# ------------------------------
def create_dip_table(engine: Engine, table_name: str) -> None:
    """创建 dip 表

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # 如果表存在，先删除，用ORACLE过程
        conn.execute(text(f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"))
        # 创建新表
        create_table_sql = f"""
            CREATE TABLE {table_name} (
                hsp_abbr VARCHAR(200),
                setl_id VARCHAR(200),
                dip_code VARCHAR(200),
                dip_name VARCHAR(1000),
                dx_pattern VARCHAR(200),
                dx_pattern_name VARCHAR(1000),
                tx_pattern VARCHAR(200),
                tx_pattern_name VARCHAR(1000),
                dip_group_type VARCHAR(200),
                dip_score NUMBER,
                dip_dx_type VARCHAR(200),
                dip_tx_type VARCHAR(200),
                dip_rank_type NUMBER,
                dip_priority NUMBER
            )
        """
        conn.execute(text(create_table_sql))
        conn.commit()
        print(f"[{elapsed()}]Table {table_name} created.")


# ------------------------------
#  分组
# ------------------------------
def _dip_single(
    engine: Engine,
    datum: pd.Series,
    table_name: str,
    dx_cases_table: str,
    tx_cases_table: str
) -> tuple[int, str]:
    """对单个分组进行 dip 分组."""

    dip_code_display = '' if pd.isna(datum['dip_code']) else str(datum['dip_code'])
    dip_code = to_sql_string(datum['dip_code'])
    dip_name = to_sql_string(datum['dip_name'])

    raw_dx_pattern = datum['dx_pattern']
    if raw_dx_pattern is None or (isinstance(raw_dx_pattern, float) and pd.isna(raw_dx_pattern)):
        raw_dx_pattern = 'a(n)'
    else:
        raw_dx_pattern = str(raw_dx_pattern).strip() or 'a(n)'
    dx_pattern_key = normalize_pattern(raw_dx_pattern)
    dx_pattern_display = to_sql_string(raw_dx_pattern)
    dx_pattern_name = to_sql_string(datum['dx_pattern_name'])

    raw_tx_pattern = datum['tx_pattern']
    if raw_tx_pattern is None or (isinstance(raw_tx_pattern, float) and pd.isna(raw_tx_pattern)):
        raw_tx_pattern = 'a(n)'
    else:
        raw_tx_pattern = str(raw_tx_pattern).strip() or 'a(n)'
    tx_pattern_key = normalize_pattern(raw_tx_pattern)
    tx_pattern_display = to_sql_string(raw_tx_pattern)
    tx_pattern_name = to_sql_string(datum['tx_pattern_name'])

    dip_group_type = to_sql_string(datum['dip_group_type'])
    dip_dx_type = to_sql_string(datum['dip_dx_type'])
    dip_tx_type = to_sql_string(datum['dip_tx_type'])

    dip_score = datum['dip_score']
    dip_rank_type = datum['dip_rank_type']
    dip_score_sql = 'NULL' if pd.isna(dip_score) else str(dip_score)
    dip_rank_type_sql = 'NULL' if pd.isna(dip_rank_type) else str(dip_rank_type)

    join_clauses: list[str] = []
    if dx_pattern_key != 'a(n)':
        join_clauses.append(
            f"JOIN {dx_cases_table} dxp ON dxp.setl_id = m.setl_id AND dxp.pattern = '{escape_sql_literal(dx_pattern_key)}'"
        )
    if tx_pattern_key != 'a(n)':
        join_clauses.append(
            f"JOIN {tx_cases_table} txp ON txp.setl_id = m.setl_id AND txp.pattern = '{escape_sql_literal(tx_pattern_key)}'"
        )
    joins_sql = ("\n" + "\n".join(join_clauses)) if join_clauses else ''

    sql = f"""
        INSERT INTO {table_name} (
            hsp_abbr, setl_id, dip_code, dip_name, dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
            dip_group_type, dip_score, dip_dx_type, dip_tx_type, dip_rank_type
        )
        SELECT
            m.hsp_abbr, m.setl_id,
            '{dip_code}', '{dip_name}', '{dx_pattern_display}', '{dx_pattern_name}', '{tx_pattern_display}', '{tx_pattern_name}',
            '{dip_group_type}', {dip_score_sql}, '{dip_dx_type}', '{dip_tx_type}', {dip_rank_type_sql}
        FROM
            m{joins_sql}
        WHERE
            m.med_type in ('普通住院','21')
    """

    with engine.begin() as conn:
        result = conn.execute(text(sql))
        rowcount = result.rowcount if result.rowcount and result.rowcount > 0 else 0

    return rowcount, dip_code_display

def dip_all(
    engine: Engine,
    dip_library_df0: pd.DataFrame,
    table_name: str,
    dx_cases_table: str,
    tx_cases_table: str,
) -> None:
    """对所有分组进行 dip 分组

    Args:
        engine (Engine): 数据库引擎
        dip_library_df (pd.DataFrame): dipLibrary 数据
        table_name (str): dip 表名
    """
    dip_library_df = dip_library_df0.copy()
    # 仅保留有效分组
    sql_dx_pattern = f"SELECT DISTINCT pattern FROM {dx_cases_table}"
    sql_tx_pattern = f"SELECT DISTINCT pattern FROM {tx_cases_table}"
    with engine.connect() as conn:
        existing_dx_patterns = {row[0] for row in conn.execute(text(sql_dx_pattern)).fetchall()}
        existing_tx_patterns = {row[0] for row in conn.execute(text(sql_tx_pattern)).fetchall()}
    dip_library_df = dip_library_df[
        dip_library_df['dx_pattern'].apply(lambda v: normalize_pattern(v) in existing_dx_patterns) |
        dip_library_df['tx_pattern'].apply(lambda v: normalize_pattern(v) in existing_tx_patterns)
    ].reset_index(drop=True)

    total_jobs = len(dip_library_df)
    total_rows = 0
    for idx, row in dip_library_df.iterrows():
        rows, dip_code = _dip_single(engine, row, table_name, dx_cases_table, tx_cases_table)
        total_rows += rows
        print(f"\r[{elapsed()}] {idx + 1}/{total_jobs}, {total_rows} rows, {dip_code}.                                 ", end='')

# ------------------------------
# 创建优先级列
# ------------------------------
def add_priority_column(engine: Engine, table_name: str) -> None:
    """为 dip 表添加优先级列

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # 更新优先级列，按照 dip_rank_type 降序，dip_score 降序取 row_number 作为优先级
        update_sql = f"""
            MERGE INTO {table_name} target
            USING (
                SELECT setl_id, dip_code, 
                       ROW_NUMBER() OVER (PARTITION BY setl_id ORDER BY dip_rank_type DESC, dip_score DESC) AS priority
                FROM {table_name}
            ) src
            ON (target.setl_id = src.setl_id AND target.dip_code = src.dip_code)
            WHEN MATCHED THEN
            UPDATE SET target.dip_priority = src.priority
        """
        conn.execute(text(update_sql))
        conn.commit()
        print(f"[{elapsed()}] Added priority column to {table_name}.")

# ------------------------------
# 创建索引
# ------------------------------
def create_indexes(engine: Engine, table_name: str) -> None:
    """为 dip 表创建索引

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip 表名
    """
    with engine.connect() as conn:
        # conn.execute(text(f"CREATE INDEX idx_dip_setl_id ON {table_name}(setl_id)"))
        conn.execute(text(f"CREATE INDEX idx_dip_dip_code ON {table_name}(dip_code)"))
        conn.execute(text(f"CREATE INDEX idx_dip_dip_rank_type ON {table_name}(dip_rank_type)"))
        conn.execute(text(f"CREATE INDEX idx_dip_dip_score ON {table_name}(dip_score)"))
        conn.commit()
        print(f"[{elapsed()}] Created indexes on {table_name}.")

# ------------------------------
#  创建 dip_case 表
# ------------------------------
def create_dip_case_table(engine: Engine, table_name: str, table_source_name: str) -> None:
    """创建 dip_case 表

    Args:
        engine (Engine): 数据库引擎
        table_name (str): dip_case 表名
    """
    with engine.connect() as conn:
        # 如果表存在，先删除，用ORACLE过程
        conn.execute(text(f"BEGIN EXECUTE IMMEDIATE 'DROP TABLE {table_name}'; EXCEPTION WHEN OTHERS THEN NULL; END;"))
        # 创建新表
        create_table_sql = f"""
            CREATE TABLE {table_name} (
                hsp_abbr VARCHAR2(200),
                setl_id VARCHAR2(200) PRIMARY KEY,
                dip_code VARCHAR2(200),
                dip_name VARCHAR2(1000),
                dx_pattern VARCHAR2(200),
                dx_pattern_name VARCHAR2(1000),
                tx_pattern VARCHAR2(200),
                tx_pattern_name VARCHAR2(1000),
                dip_group_type VARCHAR2(200),
                dip_dx_type VARCHAR2(200),
                dip_tx_type VARCHAR2(200),
                dip_rank_type NUMBER,
                dip_score NUMBER,
                top_oper_code VARCHAR2(200),
                top_oper_name VARCHAR2(1000),
                p NUMBER,
                is_rescue VARCHAR2(1)
            )
        """
        conn.execute(text(create_table_sql))
        conn.commit()
        # 插入优先级为1的数据
        insert_sql = f"""
            INSERT INTO {table_name} (
                hsp_abbr, setl_id, dip_code, dip_name, 
                dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
                dip_group_type, dip_dx_type, dip_tx_type, dip_rank_type,
                dip_score
            )
            SELECT hsp_abbr, setl_id, dip_code, dip_name, dx_pattern, dx_pattern_name, tx_pattern, tx_pattern_name,
                   dip_group_type, dip_dx_type, dip_tx_type, dip_rank_type, dip_score
            FROM {table_source_name} dip
            WHERE dip_priority = 1
        """
        conn.execute(text(insert_sql))
        conn.commit()
        print(f"[{elapsed()}]Table {table_name} created.")

# ------------------------------
#  更新 dip_case 表
# ------------------------------
def update_dip_case_table(engine: Engine, dip_table: str, dip_case_table: str) -> None:
    """更新 dip_case 表，查询 d 表，将每一个 setl_id 的单价(p)最高的 item_code, item_name, p，更新到 dip_case 表中

    Args:
        engine (Engine): 数据库引擎
        dip_table (str): dip 表名
        dip_case_table (str): dip_case 表名
    """

    # top_oper_code, top_oper_name，p
    with engine.connect() as conn:
        # 更新数据
        update_sql = f"""
            MERGE INTO {dip_case_table} target
            USING (
                SELECT setl_id, item_code, item_name, p
                FROM (
                    SELECT
                        d.setl_id,
                        d.item_std_code as item_code,
                        d.item_std_name as item_name,
                        d.p,
                        ROW_NUMBER() OVER (
                            PARTITION BY d.setl_id
                            ORDER BY d.p DESC NULLS LAST, d.item_std_code
                        ) AS rn
                    FROM d d
                    WHERE d.item_type IN ('治疗费','手术费')
                )
                WHERE rn = 1
            ) src
            ON (target.setl_id = src.setl_id)
            WHEN MATCHED THEN
            UPDATE SET target.top_oper_code = src.item_code,
                       target.top_oper_name = src.item_name,
                       target.p = src.p
        """
        conn.execute(text(update_sql))
        conn.commit()
        print(f"[{elapsed()}]Updated {dip_case_table} table with top oper.")
    
    # is_rescue 列
    # 定义抢救的 item_ptn 列表
    rescure_item_ptns = (
        '001103000010000-110300001', '001104000010000-110400001', '001109000030000-110900003', '001201000010000-120100001', 
        '001201000020000-120100002', '001201000060000-120100006', '001201000100000-120100010', '001202000010000-120200001', 
        '120200002', '120200003', '001204000030000-120400003', '003106030010000-310603001', '003106030020000-310603002', 
        '003106070030000-310607003', '003106070040000-310607004', '003106070050000-310607005', '003107010220000-310701022', 
        '003107020020000-310702002', '003107020090000-310702009', '003107020160000-310702016', '003107020170000-310702017', 
        '003107020180000-310702018', '003107020220000-310702022', '003112020030000-311202003', '003112020040000-311202004', 
        '003112020070000-311202007', '003112020120000-311202012', '003114000400000-311400040', '003114000410000-311400041', 
        '003114000420000-311400042', '003114000430000-311400043', '003115030040000-311503004', '003115030050000-311503005', 
        '003115030070000-311503007', '003301000120000-330100012', '003301000130000-330100013', '003302010180000-330201018', 
        '003307010030000-330701003', '003307010040000-330701004', '003307010050000-330701005', '001103000010100-110300001-1', 
        '003107020180100-310702018-1', '003112020070100-311202007-1', '003112020070200-311202007-2', '003112020070300-311202007-3', 
        '003112020070000-311202007-4', 
        '急诊监护费', '院前急救费', '监护病房床位费', '重症监护', '特级护理', '特殊疾病护理', '气管切开护理', '大抢救', '中抢救', '小抢救', 
        '心内注射', '呼吸机辅助呼吸', '无创呼吸机辅助通气', '婴儿氧舱治疗', '急救单独开舱治疗', '舱内抢救', '心电监测', '持续有创性血压监测', 
        '埋藏式心脏复律除颤器安置术', '心脏电复律术', '心脏电除颤术', '体外自动心脏变律除颤术', '心包穿刺术', '新生儿复苏', '新生儿气管插管术', 
        '新生儿监护', '新生儿辐射抢救治疗', '烧伤抢救(大）', '烧伤抢救(中）', '烧伤抢救(小）', '烧伤复合伤抢救', '电休克治疗', 
        '多参数监护无抽搐电休克治疗', '胰岛素低血糖和休克治疗', '心肺复苏术', '气管插管术', '颅内压监护传感器置入术', '环甲膜穿刺术', 
        '环甲膜切开术', '气管切开术', '急诊监护费(半日)', '体外半自动心脏变律除颤术', '新生儿单独心电监护', '新生儿心电、呼吸、血压监护', 
        '新生儿心电、呼吸、血压、氧饱和度监护', '新生儿单独呼吸监护'
    )
    # 从 scene_item_ext 表中，获取 item_code 或 item_name 是 item_ptn 之一的 item_j_code 列表
    rescure_item_codes_conditions = " OR ".join(
        [f"item_code = '{escape_sql_literal(ptn)}' OR item_name = '{escape_sql_literal(ptn)}'" for ptn in rescure_item_ptns]
    )
    rescure_item_codes_subquery = f"SELECT DISTINCT item_j_code FROM scene_item_ext WHERE {rescure_item_codes_conditions}"
    with engine.connect() as conn:
        query = text(rescure_item_codes_subquery)
        item_j_codes = [row[0] for row in conn.execute(query).fetchall()]
    
    # 定义 item_j_codes 的 in 子句
    if not item_j_codes:
        item_j_codes_in_clause = "('')"
    else:
        item_j_codes_escaped = [f"'{escape_sql_literal(code)}'" for code in item_j_codes]
        item_j_codes_in_clause = "(" + ", ".join(item_j_codes_escaped) + ")"

    with engine.connect() as conn:
        # 更新数据
        update_sql_default = f"""
            UPDATE {dip_case_table} target
            SET is_rescue = 0
        """
        conn.execute(text(update_sql_default))
        conn.commit()
        update_sql = f"""
            UPDATE {dip_case_table} target
            SET is_rescue = 1
            WHERE EXISTS (
                SELECT 1
                FROM d d
                WHERE d.setl_id = target.setl_id
                    AND d.item_j_code IN {item_j_codes_in_clause}
            )
        """
        print(f"[{elapsed()}]Updated {dip_case_table} table with is_rescue.")

    # 创建索引（存在则跳过）
    with engine.connect() as conn:
        # setl_id 由 PRIMARY KEY 自动拥有唯一索引，无需重复创建
        
        # 创建 dip_code 索引
        idx_name = "IDX_DIP_CASE_DIP_CODE"
        exists = conn.execute(
            text("SELECT COUNT(*) FROM user_indexes WHERE index_name = :name"),
            {"name": idx_name}
        ).scalar()
        if exists:
            print(f"[{elapsed()}]Index {idx_name} already exists, skip creating.")
        else:
            conn.execute(text(f"CREATE INDEX {idx_name} ON {dip_case_table}(dip_code)"))
            print(f"[{elapsed()}]Created index {idx_name}.")
        conn.commit()

        # 创建 tx_pattern 索引
        idx_name = "IDX_DIP_CASE_TX_PATTERN"
        exists = conn.execute(
            text("SELECT COUNT(*) FROM user_indexes WHERE index_name = :name"),
            {"name": idx_name}
        ).scalar()
        if exists:
            print(f"[{elapsed()}]Index {idx_name} already exists, skip creating.")
        else:
            conn.execute(text(f"CREATE INDEX {idx_name} ON {dip_case_table}(tx_pattern)"))
            print(f"[{elapsed()}]Created index {idx_name}.")
        conn.commit()

# ------------------------------
#  主流程
# ------------------------------
def main():
    # 创建引擎
    engine = create_db_engine()

    # 读取 dipLibrary.xlsx
    dip_library_path = Path(ROOT_DIR) / 'env.xlsx'
    dip_library_df, tx_type_df = read_dip_library(dip_library_path)

    # 上传手术编码分类
    upload_tx_class(engine, tx_type_df)

    # 预计算 dx / tx pattern 对应的病例
    dx_cases_table_name = 'dip_dx_ptn_cases'
    tx_cases_table_name = 'dip_tx_ptn_cases'
    prepare_dx_pattern_cases(engine, dip_library_df, dx_cases_table_name)
    prepare_tx_pattern_cases(engine, dip_library_df, tx_cases_table_name)

    # 创建 dip 表
    dip_table_name = 'dip'
    create_dip_table(engine, dip_table_name)

    # 进行 dip 分组
    dip_all(engine, dip_library_df, dip_table_name, dx_cases_table_name, tx_cases_table_name)
    print(f"[{elapsed()}] All done.")

    # 添加优先级列
    add_priority_column(engine, dip_table_name)

    # 创建索引
    create_indexes(engine, dip_table_name)

    # 创建 dip_case 表
    dip_case_table_name = 'dip_case'
    create_dip_case_table(engine, dip_case_table_name, dip_table_name)

    # 更新 dip_case 表
    update_dip_case_table(engine, dip_table_name, dip_case_table_name)

if __name__ == '__main__':
    main()