"""
飞检数据STEP8筛选 - summary模块
============================================================================

将 VLT2 表中的信息按照每个 hsp_abbr、每个 rule_id 做汇总统计。

主要功能：
1. VLT2 表按照 hsp_abbr、rule_id 分组，统计：
   - 结算ID数（去重 setl_id 数）
   - 总数量（sum(q)）
   - 总金额（sum(c)）
   - 总医保内金额（sum(b)）
   - 违规情形描述（来自 rule_flatten 中 key like '%info%' 的 value）
   - 依据（来自 rule_flatten 中 key like '%description%' 的 value）
   - 科室（来自 rule_flatten 中 key like '%dept%' 的 value）
   - 违规性质（来自 rule_flatten 中 key like '%nature%' 的 value）
   - 备注（来自 rule_flatten 中 key like '%note%' 的 value）
2. 输出到本目录下 summary/事前疑点汇总_内部_{hspAbbr}.xlsx
3. 导出使用率
    3.1. 从 scene_item 分不同的 hsp_abbr 导出，输出到本目录下 summary/项目使用率_{hspAbbr}.xlsx
    3.2. 从 scene_item_ext 分不同的 hsp_abbr 导出，输出到本目录下 summary/项目字典_{hspAbbr}.xlsx
4. 分不同的 hsp_abbr 生成不同的 pickup 文件，输出到本目录下 summary/病例挑选_内部_{hspAbbr}.xlsx

备注：VLT2 的生成过程参考 step8_2_conclude.py。
============================================================================
"""

import os
import re
import sys
import time
import pandas as pd
from sqlalchemy import text

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config import create_db_engine

_ILLEGAL_CHAR_RE = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f]")


def _clean_excel_string(value):
    """去掉 Excel 不允许的控制字符。"""
    if not isinstance(value, str):
        return value
    return _ILLEGAL_CHAR_RE.sub("", value)

# 时间函数
t0 = time.time()
def elapsed() -> str:
    timeStr = time.strftime("%H:%M:%S", time.localtime())
    delta = int(time.time() - t0)
    if delta < 60:
        return f"{timeStr} (+ {delta} sec)"
    elif delta < 3600:
        m, s = divmod(delta, 60)
        return f"{timeStr} (+ {m} min {s} sec)"
    elif delta < 86400:
        h, rem = divmod(delta, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {h} hour {m} min {s} sec)"
    else:
        d, rem = divmod(delta, 86400)
        h, rem = divmod(rem, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {d} day {h} hour {m} min {s} sec)"


def fetch_rule_descriptions(engine):
    """读取 rule_id -> description 映射"""
    query = text(
        f"""
        SELECT rule_id, value AS description
        FROM rule_flatten
        WHERE key LIKE '%description%'
        """
    )
    df = pd.read_sql(query, engine)
    # 如果同一个 rule_id 有多行，优先取第一条
    df = df.dropna(subset=["rule_id"]).drop_duplicates(subset=["rule_id"], keep="first")
    return pd.Series(df["description"].values, index=df["rule_id"].values).to_dict()

def fetch_rule_natures(engine):
    """读取 rule_id -> nature 映射"""
    query = text(
        f"""
        SELECT rule_id, value AS nature
        FROM rule_flatten
        WHERE key LIKE '%nature%'
        """
    )
    df = pd.read_sql(query, engine)
    df = df.dropna(subset=["rule_id"]).drop_duplicates(subset=["rule_id"], keep="first")
    return pd.Series(df["nature"].values, index=df["rule_id"].values).to_dict()

def fetch_rule_depts(engine):
    """读取 rule_id -> dept 映射"""
    query = text(
        f"""
        SELECT rule_id, value AS dept
        FROM rule_flatten
        WHERE key LIKE '%dept%'
        """
    )
    df = pd.read_sql(query, engine)
    df = df.dropna(subset=["rule_id"]).drop_duplicates(subset=["rule_id"], keep="first")
    return pd.Series(df["dept"].values, index=df["rule_id"].values).to_dict()

def fetch_rule_notes(engine):
    """读取 rule_id -> note 映射"""
    query = text(
        f"""
        SELECT rule_id, value AS note
        FROM rule_flatten
        WHERE key LIKE '%note%'
        """
    )
    df = pd.read_sql(query, engine)
    df = df.dropna(subset=["rule_id"]).drop_duplicates(subset=["rule_id"], keep="first")
    return pd.Series(df["note"].values, index=df["rule_id"].values).to_dict()

def fetch_rule_infos(engine):
    """读取 rule_id -> info 映射"""
    query = text(
        f"""
        SELECT rule_id, value AS info
        FROM rule_flatten
        WHERE key LIKE '%info%'
        """
    )
    df = pd.read_sql(query, engine)
    df = df.dropna(subset=["rule_id"]).drop_duplicates(subset=["rule_id"], keep="first")
    return pd.Series(df["info"].values, index=df["rule_id"].values).to_dict()

def _sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame:
    """清理 DataFrame，确保所有列都是基本类型，避免写 Excel 出错"""
    for col in df.columns:
        if pd.api.types.is_datetime64_any_dtype(df[col]):
            df[col] = df[col].apply(lambda x: "" if pd.isna(x) else str(x))
        elif isinstance(df[col].dtypes, pd.CategoricalDtype):
            df[col] = df[col].apply(lambda x: "" if pd.isna(x) else str(x))
        elif pd.api.types.is_object_dtype(df[col]):
            df[col] = df[col].apply(lambda x: "" if pd.isna(x) else str(x))
        if pd.api.types.is_object_dtype(df[col]):
            df[col] = df[col].map(_clean_excel_string).replace({"nan": "", "NaT": ""})
    return df

# ============
# 主流程
# ============
def _prepare_listagg_helper_table(engine, hsp_abbr: str, table_name: str = "TMP_VLT2_LISTAGG") -> None:
    """预先构建存放 LISTAGG 结果的辅助表。"""
    table_name = table_name.upper()
    query_check = text(
        """
        SELECT COUNT(1)
        FROM user_tables
        WHERE table_name = :table_name
        """
    )

    drop_sql = text(f"DROP TABLE {table_name}")

    create_sql = text(
        f"""
        CREATE TABLE {table_name} AS
        WITH TBL_ITEMLIST AS (
            SELECT DISTINCT vlt.hsp_abbr,
                vlt.rule_id,
                NVL(d.item_name, d.item_hsp_name) AS item_name,
                NVL(d.item_code, d.item_hsp_code) AS item_code
            FROM vlt, d
            WHERE vlt.hsp_abbr = '{hsp_abbr}'
                AND vlt.d0_rowid = d.d0_rowid
                AND vlt.q > 0
                AND vlt.c > 0
        )
        ,TBL_ITEMLIST_LEN AS (
            SELECT hsp_abbr,
                rule_id,
                item_name,
                item_code,
                LENGTHB(item_name || '(' || item_code || ')、') AS item_len
            FROM TBL_ITEMLIST
        )
        ,TBL_ITEMLIST_SUM_LEN AS (
            SELECT hsp_abbr,
                rule_id,
                SUM(item_len) AS sum_len
            FROM TBL_ITEMLIST_LEN
            GROUP BY hsp_abbr, rule_id
        )
        ,TBL_ITEMLIST_ACCU_LEN AS (
            SELECT hsp_abbr,
                rule_id,
                item_name,
                item_code,
                SUM(item_len) OVER (PARTITION BY hsp_abbr, rule_id ORDER BY item_len ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS accu_len
            FROM TBL_ITEMLIST_LEN
        )
        ,TBL_ITEMLIST_FINAL AS (
            SELECT al.hsp_abbr,
                al.rule_id,
                al.item_name,
                al.item_code,
                al.accu_len,
                sl.sum_len
            FROM TBL_ITEMLIST_ACCU_LEN al,
                TBL_ITEMLIST_SUM_LEN sl
            WHERE al.accu_len <= 3900
                AND al.hsp_abbr = sl.hsp_abbr
                AND al.rule_id = sl.rule_id
        )
        SELECT hsp_abbr,
            rule_id,
            listagg(DISTINCT t.item_name || '(' || t.item_code || ')', '、') WITHIN GROUP (ORDER BY t.item_name)
                || CASE WHEN MAX(sum_len) > MAX(accu_len) THEN '等' ELSE '' END AS item_list
        FROM TBL_ITEMLIST_FINAL t
        GROUP BY hsp_abbr, rule_id
        """
    )

    with engine.begin() as conn:
        exists_count = conn.execute(query_check, {"table_name": table_name}).scalar()
        if exists_count and exists_count > 0:
            conn.execute(drop_sql)
        conn.execute(create_sql)
        print(f"[{elapsed()}] 辅助表 {table_name} 已创建")

def fetch_vlt2_summary(engine, hsp_abbr):
    """从 VLT2 拉取分组汇总数据"""
    _prepare_listagg_helper_table(engine, hsp_abbr)
    query = text(
        """
            SELECT
                    vlt2.hsp_abbr, vlt2.rule_id,
                    MAX(t.item_list) AS item_list,
                    COUNT(DISTINCT setl_id) AS setl_cnt,
                    SUM(vlt_q) AS sum_q, SUM(vlt_c) AS sum_c, SUM(vlt_b) AS sum_b
            FROM vlt2
            JOIN TMP_VLT2_LISTAGG t ON vlt2.hsp_abbr = t.hsp_abbr AND vlt2.rule_id = t.rule_id
            WHERE vlt2.hsp_abbr = :hsp_abbr
            GROUP BY vlt2.hsp_abbr, vlt2.rule_id
            ORDER BY vlt2.hsp_abbr, vlt2.rule_id
        """
    )
    return pd.read_sql(query, engine, params={"hsp_abbr": hsp_abbr})


def write_summary_excel(df: pd.DataFrame, base_dir: str, hsp_abbr: str) -> None:
    out_dir = os.path.join(base_dir, "summary")
    os.makedirs(out_dir, exist_ok=True)
    out_file = os.path.join(out_dir, f"事前疑点汇总_内部_{hsp_abbr}.xlsx")
    
    # 创建 engine
    engine = create_db_engine()

    # 整合为一个 excel 文件，列顺序分别是规则编号（ruleID）、性质、科室、描述、结算ID数、总数量、总金额、总医保内金额、依据、备注
    # 如果某一行对应不上则留空
    df['违规性质'] = df['rule_id'].map(fetch_rule_natures(engine))
    df['科室'] = df['rule_id'].map(fetch_rule_depts(engine))
    df['备注'] = df['rule_id'].map(fetch_rule_notes(engine))
    df['描述'] = df['rule_id'].map(fetch_rule_descriptions(engine))
    df['依据'] = df['rule_id'].map(fetch_rule_infos(engine))
    cols = ['hsp_abbr', 'rule_id', '违规性质', '科室', '描述', 'item_list', 'setl_cnt', 'sum_q', 'sum_c', 'sum_b', '依据', '备注']
    df = df[cols]
    df.columns = ['医院简称', '规则编号', '违规性质', '科室', '描述', '违规项目', '病例数', '总数量', '总金额', '总医保内金额', '依据', '备注']
    df = _sanitize_dataframe(df)    
    
    # 输出 Excel
    df.to_excel(out_file, index=False)
    print(f"[{elapsed()}] 写出文件: {out_file}")

def write_usage_excel(base_dir: str, hsp_abbr: str) -> None:
    out_dir = os.path.join(base_dir, "summary")
    os.makedirs(out_dir, exist_ok=True)
    out_file = os.path.join(out_dir, f"项目使用率_{hsp_abbr}.xlsx")
    
    # 创建 engine
    engine = create_db_engine()
    # 统计主表格
    query = text(
        """
        SELECT
            j_year as 结算年份,
            med_type as 医疗类别编码,
            decode(j_has_b, '1', '有使用基金', '0', '整单自费', j_has_b) as 整单自费标识,
            item_u_code as 医院项目编码, item_hsp_name as 医院项目名称,
            item_code as 国家项目编码, item_name as 国家项目名称,
            item_type as 项目类别,
            item_depts as 开单科室,
            apply_depts as 执行科室,
            item_time_earliest as 最早使用日期,
            item_time_latest as 最晚使用日期,
            sum_q as 使用数量,
            sum_c as 总金额,
            sum_b as 总医保内金额,
            p as 单价,
            used as 使用人次数,
            usage as 使用率,
            bz as 医保备注
        FROM scene_item
        WHERE hsp_abbr = :hsp_abbr
            AND is_bottom = '0'
        """
    )
    df = pd.read_sql(query, engine, params={"hsp_abbr": hsp_abbr})
    
    # 获得医疗类别的映射
    med_type_map_sql = text('SELECT DISTINCT 医疗类别, 医疗类别编码 FROM v_setl_map')
    med_type_map_df = pd.read_sql(med_type_map_sql, engine)
    med_type_map = pd.Series(med_type_map_df['医疗类别'].values, index=med_type_map_df['医疗类别编码'].values).to_dict()
    df['医疗类别'] = df['医疗类别编码'].apply(lambda x: med_type_map[x] if x in med_type_map else x)
    # 删掉编码列
    df = df.drop(columns=['医疗类别编码'])
    # 重新排列顺序，将编码列放在第二列
    df = df[[
        '结算年份', '医疗类别', '整单自费标识', '医院项目编码', '医院项目名称', 
        '国家项目编码', '国家项目名称','项目类别',
        '开单科室', '执行科室', '最早使用日期', '最晚使用日期', '使用数量', '总金额', '总医保内金额',
        '单价', '使用人次数', '使用率', '医保备注'
    ]]

    # 统一列顺序
    df = df[[
        '结算年份', '医疗类别', '整单自费标识', '医院项目编码', '医院项目名称', 
        '国家项目编码', '国家项目名称','项目类别',
        '开单科室', '执行科室', '最早使用日期', '最晚使用日期', '使用数量', '总金额', '总医保内金额',
        '单价', '使用人次数', '使用率', '医保备注'
    ]]

    df = _sanitize_dataframe(df)

    # 判断，如果df当中的 执行科室 一列为空，则删除全列
    if df['执行科室'].isnull().all() or (df['执行科室'] == '').all():
        df = df.drop(columns=['执行科室'])

    # 另外组一个 注 的 sheet
    head = '注：'
    contents = [
        "1.项目使用率=出院患者使用该项目总人次数÷同期出院总人数×100%。",
        "2.统计顺序依次为：",
        "（1）以结算年度为第一分类；",
        "（2）项目费用类别：医院编码、单价相同的合并为一项，如医院编码为空或乱码的，以国家项目编码、单价合并为一项；",
        "（3）开单科室：合并为一项的项目费用类别所涉及的开单科室合并，用逗号分开；",
        "（4）项目时间：合并为一项的项目费用类别的费用发生最早时间与最晚时间。",
    ]
    # 形成一个 df， 第一行（表头）是head，后面是 contents
    note_df = pd.DataFrame(columns=[head])
    for content in contents:
        note_df = pd.concat([note_df, pd.DataFrame([[content]], columns=[head])], ignore_index=True)
    
    # 输出 Excel
    with pd.ExcelWriter(out_file, engine='openpyxl') as writer:
        df.to_excel(writer, sheet_name='项目使用率', index=False)
        note_df.to_excel(writer, sheet_name='注', index=False)
    print(f"[{elapsed()}] 写出文件: {out_file}")

def update_sie(engine, hsp_abbr):
    """更新 scene_item_ext 表，将审核规则触发及违规数量的信息更新到 scene_item_ext 的 trigger_info 列种 """
    # 判断 scene_item_ext 是否存在 trigger_info 列，如果有就先删除。用过程
    query_check_column = text(
        """
            SELECT COUNT(*) AS cnt
            FROM user_tab_columns
            WHERE table_name = 'SCENE_ITEM_EXT' AND column_name = 'TRIGGER_INFO'
        """
    )
    with engine.connect() as conn:
        result = conn.execute(query_check_column)
        cnt = result.scalar()
        if cnt > 0:
            # 如果存在 trigger_info 列，则先删除
            query_drop_column = text(
                """
                    ALTER TABLE SCENE_ITEM_EXT DROP COLUMN TRIGGER_INFO
                """
            )
            conn.execute(query_drop_column)
            conn.commit()
    
    # 然后添加 trigger_info 列
    query_add_column = text(
        """
            ALTER TABLE SCENE_ITEM_EXT ADD TRIGGER_INFO VARCHAR2(4000)
        """
    )
    with engine.connect() as conn:
        conn.execute(query_add_column)
        conn.commit()
    
    # 统计每个 item_j_code 的触发与坐实的情况，更新到 scene_item_ext 的 trigger_info 列中
    query_update_trigger_info = text(
        f"""
            MERGE INTO scene_item_ext sie
            USING (
                WITH T0 AS (
                    -- 统计每个 item_j_code 的触发规则
                    SELECT DISTINCT ra.rule_id, ra.value as item_j_code
                    FROM rule_apply ra
                    WHERE ra.hsp_abbr = :hsp_abbr
                            AND ra.KEY like '%.trigger_apply#%.j_code%'
                )
                ,T1 AS (
                    SELECT vlt.item_j_code, vlt.RULE_ID, sum(vlt_q) as sum_vlt_q
                    FROM vlt
                    WHERE vlt.hsp_abbr = :hsp_abbr
                    GROUP BY vlt.item_j_code, vlt.RULE_ID
                )
                ,T2 AS (
                    SELECT t0.item_j_code, t0.rule_id, nvl(t1.sum_vlt_q, 0) as sum_vlt_q
                    FROM t0 LEFT OUTER JOIN t1 ON t0.item_j_code = t1.item_j_code and t0.rule_id = t1.rule_id
                )
                SELECT
                    t2.item_j_code, 
                    listagg(distinct rule_id || ':' || round(sum_vlt_q, 1), '|') as trigger_info
                FROM T2
                GROUP BY T2.item_j_code
            ) ti
            ON (sie.hsp_abbr = :hsp_abbr AND sie.item_j_code = ti.item_j_code)
            WHEN MATCHED THEN
                UPDATE SET sie.trigger_info = ti.trigger_info
        """
    )
    with engine.connect() as conn:
        conn.execute(query_update_trigger_info, {"hsp_abbr": hsp_abbr})
        conn.commit()

def write_sie_excel(base_dir: str, hsp_abbr: str) -> None:
    out_dir = os.path.join(base_dir, "summary")
    os.makedirs(out_dir, exist_ok=True)
    out_file = os.path.join(out_dir, f"scene_item_ext_{hsp_abbr}.xlsx")
    
    # 创建 engine
    engine = create_db_engine()
    
    query = text(
        f"""
        SELECT *
        FROM scene_item_ext
        WHERE hsp_abbr = :hsp_abbr
        """
    )
    df = pd.read_sql(query, engine, params={"hsp_abbr": hsp_abbr})
    df = _sanitize_dataframe(df)
    
    # 输出 Excel
    df.to_excel(out_file, index=False)
    print(f"[{elapsed()}] 写出文件: {out_file}")

def write_pickup_excel(base_dir: str, hsp_abbr: str) -> None:
    out_dir = os.path.join(base_dir, "summary")
    os.makedirs(out_dir, exist_ok=True)
    out_file = os.path.join(out_dir, f"病例挑选_内部_{hsp_abbr}.xlsx")
    
    # 创建 engine
    engine = create_db_engine()
    
    query = text(
        f"""
        with t0 as (
            select distinct
                vlt2.rule_id, m.setl_id, m.mdtrt_id, m.in_out_id, m.certno, m.psn_name, m.in_day, m.out_day, m.j_isin, 
                (case 
                    when m.mdtrt_id is not null and m.in_out_id is not null and m.certno is not null and m.psn_name is not null
                    and m.in_day is not null and m.out_day is not null then 1 else 0 end) + m.j_isin + m.j_rand as j_rand2
            from vlt2, m
            where vlt2.hsp_abbr = '{hsp_abbr}'
                and vlt2.setl_id = m.setl_id
        )
        ,t1 as (
            select t0.*,
                row_number() over(partition by rule_id order by j_rand2 desc) as r
            from t0
        )
        select 
            rule_id as 规则ID, setl_id as 结算ID, mdtrt_id as 就诊ID, in_out_id as 门诊住院号, 
            certno as 证件号码, psn_name as 姓名, in_day as 入院日期, out_day as 出院日期,
            decode(j_isin,1,'住院',0,'门诊') as 住院标识
        from t1
        where t1.r<=5
        order by rule_id, r
        """
    )
    df = pd.read_sql(query, engine, params={"hsp_abbr": hsp_abbr})
    df = _sanitize_dataframe(df)
    
    # 输出 Excel
    df.to_excel(out_file, index=False)
    print(f"[{elapsed()}] 写出文件: {out_file}")

def main():
    base_dir = os.path.dirname(os.path.abspath(__file__))
    print(f"[{elapsed()}] 启动 VLT2 汇总 ...")

    # 数据库引擎
    engine = create_db_engine()

    # 获得所有医院简称
    with engine.connect() as conn:
        hsp_abbrs = pd.read_sql(text("SELECT DISTINCT hsp_abbr FROM vlt2"), conn)["hsp_abbr"].tolist()

    for hsp_abbr in hsp_abbrs:
        print(f"[{elapsed()}] 处理医院: {hsp_abbr} ...")
        # 拉取汇总
        try:
            sum_df = fetch_vlt2_summary(engine, hsp_abbr)
        except Exception as e:
            print(f"[{elapsed()}] 读取 VLT2 失败，可能表不存在或没有数据。请先运行 step8_2_conclude.py 生成 VLT2。\n错误: {e}")
            raise

        if sum_df.empty:
            print(f"[{elapsed()}] VLT2 汇总为空，无需输出。")
            return

        # 合并规则描述（作为 依据）
        rule_desc = fetch_rule_descriptions(engine)
        sum_df["依据"] = sum_df["rule_id"].map(rule_desc).fillna("")

        # 统一列顺序
        cols = ["hsp_abbr", "rule_id", "item_list", "setl_cnt", "sum_q", "sum_c", "sum_b", "依据"]
        for c in cols:
            if c not in sum_df.columns:
                sum_df[c] = None
        sum_df = sum_df[cols]

        # 数值列格式处理（可选）
        for col in ["sum_q", "sum_c", "sum_b"]:
            if col in sum_df.columns:
                # 避免 None/NaN 导致类型混乱
                sum_df[col] = pd.to_numeric(sum_df[col], errors="coerce")

        write_summary_excel(sum_df, base_dir, hsp_abbr)
        write_usage_excel(base_dir, hsp_abbr)
        update_sie(engine, hsp_abbr)
        write_sie_excel(base_dir, hsp_abbr)
        write_pickup_excel(base_dir, hsp_abbr)
        print(f"[{elapsed()}] {hsp_abbr} 汇总完成。")


if __name__ == "__main__":
    main()
