"""
飞检数据STEP8筛选 - clue_group_chemic.py
============================================================================

分为两步：
1. 解包 
    1.1. 从 clue867_library.xlsx 中获得化验项目的pattern
    1.2. 从 scene_item_ext 中获得各个医院的项目列表
    1.3. 解包对应
2. 查 d 表，是否各个 item_pattern 的使用率都大于0.3，且全部 item_pattern 同时使用的占 item_pattern 任意一个使用的比例大于 0.9

============================================================================
"""
import json
import pandas as pd
from jbar import bar
from sqlalchemy import create_engine, text
import re
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# 读取 .env 文件
import dotenv
from config import create_db_engine
# 创建数据库引擎
engine = create_db_engine()
dotenv.load_dotenv()
# 从 common.rule_flatten_handle 中 导入 flatten_json, unflatten_json
from common.rule_flatten_handle import flatten_json, unflatten_json
import time

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config import create_db_engine

from sqlalchemy.engine import Engine


_ILLEGAL_CHAR_RE = re.compile(r"[\x00-\x08\x0b\x0c\x0e-\x1f]")

def clean_illegal_chars(s: str) -> str:
    """Remove illegal characters from a string."""
    if pd.isna(s):
        return s
    return _ILLEGAL_CHAR_RE.sub("", s)

# 时间函数
t0 = time.time()
def elapsed() -> str:
    timeStr = time.strftime("%H:%M:%S", time.localtime())
    delta = int(time.time() - t0)
    if delta < 60:
        return f"{timeStr} (+ {delta} sec)"
    elif delta < 3600:
        m, s = divmod(delta, 60)
        return f"{timeStr} (+ {m} min {s} sec)"
    elif delta < 86400:
        h, rem = divmod(delta, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {h} hour {m} min {s} sec)"
    else:
        d, rem = divmod(delta, 86400)
        h, rem = divmod(rem, 3600)
        m, s = divmod(rem, 60)
        return f"{timeStr} (+ {d} day {h} hour {m} min {s} sec)"
    
# 创建数据库引擎
engine = create_db_engine()
def load_hsp_list(json_path: str) -> list:
    """Load hospital list from a JSON file."""
    with open(json_path, 'r', encoding='utf-8') as f:
        hsp_list = json.load(f)
    return hsp_list

# ============
# 解包过程
# ============

# 读取化验项目 pattern
def load_chemic_patterns(excel_path: str) -> list:
    """Load chemic patterns from an Excel file."""
    df_patterns = pd.read_excel(excel_path, dtype=str)
    df_patterns['item_pattern'] = df_patterns['item_pattern'].apply(clean_illegal_chars)
    # 将 df_patterns 转换为 patterns_data，将同一个 group_id和group_name 的 item_pattern 合并为一个列表
    raw_data = df_patterns.to_dict(orient='records')
    last_group_id = None
    patterns_data = []
    for raw_datum in raw_data:
        group_id, group_name, item_pattern, is_vlt_raw = raw_datum['group_id'], raw_datum['group_name'], raw_datum['item_pattern'], raw_datum['is_vlt']
        is_vlt = is_vlt_raw in ('1', 1)
        if group_id != last_group_id:
            patterns_data.append({
                'group_id': group_id,
                'group_name': group_name,
                'item_ptn_data': [{'item_pattern': item_pattern, "is_vlt": is_vlt}],
            })
            last_group_id = group_id
        else:
            patterns_data[-1]['item_ptn_data'].append({'item_pattern': item_pattern, "is_vlt": is_vlt})
    return patterns_data

# 读取项目列表
def load_item_list(engine: Engine, hsp_abbr: str) -> pd.DataFrame:
    # 获取sceneItem的hsp_abbr, item_j_code, item_j_name, item_code, item_name, item_med_code, item_hsp_code, item_hsp_name
    sql = text(f"""
        SELECT distinct hsp_abbr, item_j_code, item_code, item_name, item_med_code, item_hsp_code, item_hsp_name, item_type
        FROM scene_item_ext
        WHERE hsp_abbr = :hsp_abbr
    """)
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
    # 将查询结果转换为DataFrame
    dfSceneItem = pd.DataFrame(result.fetchall(), columns=result.keys())
    return dfSceneItem

# 生成正则表达式，传入 sql 的 LIKE 模式列表，返回正则表达式字符串
def _build_pattern_re(rawList):
    patterns = []
    for x in rawList:
        if not x:
            continue
        # 将%和_转换为正则
        pattern = re.escape(x).replace(r'%', '.*').replace(r'_', '.')
        # 添加完整匹配边界
        pattern = '^' + pattern + '$'
        patterns.append(pattern)
    return '|'.join(patterns) if patterns else '^$'  # 如果patterns为空，返回永不匹配的模式

# 对单个 item_ptn_datum 解包化验项目
def _unpack_single_item_ptn(item_pattern: dict, df_items: pd.DataFrame) -> list:
    """Unpack a single item pattern into matching items."""
    # 获取正则表达式
    raw_list = item_pattern.split(',')
    pattern_re_str = _build_pattern_re(raw_list)
    # 对 df_item 当中的 item_code, item_name, item_hsp_code, item_hsp_name, item_std_code 进行匹配
    pattern_re = re.compile(pattern_re_str, re.IGNORECASE)
    mask = (
        df_items['item_code'].str.match(pattern_re) |
        df_items['item_name'].str.match(pattern_re) |
        df_items['item_hsp_code'].str.match(pattern_re) |
        df_items['item_hsp_name'].str.match(pattern_re) |
        df_items['item_med_code'].str.match(pattern_re)
    )
    # 将解包筛选出来的项目的 item_j_code, item_code, item_name, item_med_code, item_hsp_code, item_hsp_name 进行返回，按以下形式
    unpacked_data = []
    for index, row in df_items[mask].iterrows():
        unpacked_data.append({
            'j_code': row['item_j_code'],
            'j_name': None,
            'codes': row['item_code'],
            'names': row['item_name'],
            'med_codes': row['item_med_code'],
            'hsp_codes': row['item_hsp_code'],
            'hsp_names': row['item_hsp_name']
        })
    return unpacked_data

# 解包化验项目
def unpack_chemic_items(patterns_data: list, df_items: pd.DataFrame) -> list:
    """Unpack chemic items based on patterns."""
    # 在 patterns_data 的基础上，对每个 group_id 解包 item_pattern
    unpacked_data = patterns_data.copy()
    for patterns_datum in unpacked_data:
        group_id = patterns_datum['group_id']
        group_name = patterns_datum['group_name']
        item_ptn_data = patterns_datum['item_ptn_data']
        for item_ptn_datum in item_ptn_data:
            item_pattern = item_ptn_datum['item_pattern']
            raw_list = item_pattern.split(',')
            is_vlt = item_ptn_datum['is_vlt']
            unpacked_items = _unpack_single_item_ptn(item_pattern, df_items)
            item_ptn_datum['unpacked_items'] = unpacked_items
    # 返回解包结果
    return unpacked_data

# ============
# 查 d 表使用率过程
# ============
def _construct_in_clause(item_j_codes: list, tbl_abbr='d.') -> str:
    """Construct SQL IN clause for a list of item_j_codes."""
    if not item_j_codes:
        return "('')"  # 返回一个永不匹配的IN子句
    # 将 item_j_codes 分批，每批不超过1000个
    batch_size = 1000
    batches = [item_j_codes[i:i + batch_size] for i in range(0, len(item_j_codes), batch_size)]
    in_clauses = []
    for batch in batches:
        codes_str = ','.join(f"'{code}'" for code in batch)
        in_clauses.append(f"({codes_str})")
    return ' OR '.join([f"{tbl_abbr}item_j_code IN {clause}" for clause in in_clauses])

def _check_total_case_count(engine: Engine, hsp_abbr: str) -> int:
    """Check total case count for inpatient cases in a hospital."""
    sql = text("""
        SELECT COUNT(*) AS total_cases
        FROM m
        WHERE hsp_abbr = :hsp_abbr AND j_isin = '1'
    """)
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        row = result.fetchone()
        return row[0] if row else 0

def _check_single_item_ptn_usage(engine: Engine, hsp_abbr: str, item_ptn_datum: dict, total_case_count: int) -> dict:
    """Check usage rates for a single item pattern."""
    if total_case_count == 0:
        return {'used_cases': 0, 'usage': 0.0, 'sum_q': 0, 'sum_c': 0, 'sum_b': 0}
    unpacked_items = item_ptn_datum['unpacked_items']
    if not unpacked_items:
        return {'used_cases': 0, 'usage': 0.0, 'sum_q': 0, 'sum_c': 0, 'sum_b': 0}
    # 构建 item_j_code 列表，以及对应语句块
    item_j_codes = [item['j_code'] for item in unpacked_items]
    in_clause = _construct_in_clause(item_j_codes)
    # 查询总使用次数
    sql = text(f"""
        SELECT COUNT(DISTINCT setl_id) AS used_cases, SUM(q) as sum_q, sum(c) as sum_c, sum(b) as sum_b
        FROM d
        WHERE hsp_abbr = :hsp_abbr AND j_isin = '1' AND ({in_clause})
    """)
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        row = result.fetchone()
        used_cases = row[0] if row else 0
        sum_q, sum_c, sum_b = row[1], row[2], row[3] if row else (0, 0, 0)
        usage_rate = used_cases / total_case_count
        return {
            'used_cases': used_cases, 'usage': usage_rate, 'sum_q': sum_q, 'sum_c': sum_c, 'sum_b': sum_b
        }

def _check_any_in_group_usage(engine: Engine, hsp_abbr: str, patterns_datum: list, total_case_count: int) -> dict:
    """Check usage rate for existing any group of item patterns."""
    if total_case_count == 0:
        return 0.0
    # 将所有出现的 item_j_code 合并
    all_j_codes_set = set()
    for item_ptn_datum in patterns_datum:
        unpacked_items = item_ptn_datum['unpacked_items']
        for item in unpacked_items:
            all_j_codes_set.add(item['j_code'])
    all_j_codes = list(all_j_codes_set)
    if not all_j_codes:
        return 0.0
    in_clause = _construct_in_clause(all_j_codes)
    # 查询任意使用使用一个这些项目的总使用次数
    sql = text(f"""
        SELECT COUNT(DISTINCT setl_id) AS used_cases
        FROM d
        WHERE hsp_abbr = :hsp_abbr AND j_isin = '1' AND ({in_clause})
    """)
    with engine.connect() as conn:
        result = conn.execute(sql, {"hsp_abbr": hsp_abbr})
        row = result.fetchone()
        used_cases = row[0] if row else 0
        usage_rate = used_cases / total_case_count
        return usage_rate

def _check_all_in_group_usage(engine: Engine, hsp_abbr: str, patterns_datum: list, total_case_count: int) -> float:
    """Check usage rate for all group of item patterns."""
    if total_case_count == 0:
        return 0.0
    # 对每个 item_ptn_datum 获取 unpacked_items 的 item_j_code 列表
    item_j_code_lists = []
    for item_ptn_datum in patterns_datum:
        unpacked_items = item_ptn_datum['unpacked_items']
        item_j_codes = [item['j_code'] for item in unpacked_items]
        if item_j_codes:
            item_j_code_lists.append(item_j_codes)
    if not item_j_code_lists:
        return 0.0
    # 创建全局临时表，判断各个 item_ptn 是否都使用，不使用的逐步删除
    with engine.begin() as conn:
        # 如果有临时表则删除
        conn.execute(text(f"""
            BEGIN
                EXECUTE IMMEDIATE 'DROP TABLE temp_item_usage';
            EXCEPTION
                WHEN OTHERS THEN
                    NULL;
            END;
        """))
        # 创建临时表
        conn.execute(text("""
            CREATE GLOBAL TEMPORARY TABLE temp_item_usage (
                setl_id VARCHAR2(50),
                item_j_code VARCHAR2(50)
            ) ON COMMIT DELETE ROWS
        """))
        # 按照 item_j_code_lists 的第一个元素，插入数据
        first_item_j_codes = item_j_code_lists[0]
        in_clause = _construct_in_clause(first_item_j_codes)
        insert_sql = text(f"""
            INSERT INTO temp_item_usage (setl_id, item_j_code)
            SELECT DISTINCT setl_id, item_j_code
            FROM d
            WHERE hsp_abbr = :hsp_abbr AND j_isin = '1' AND ({in_clause})
        """)
        conn.execute(insert_sql, {"hsp_abbr": hsp_abbr})
        # 逐步删除不符合后续 item_j_code_lists 的记录
        for item_j_codes in item_j_code_lists[1:]:
            in_clause = _construct_in_clause(item_j_codes, tbl_abbr='temp_item_usage.')
            delete_sql = text(f"""
                DELETE FROM temp_item_usage
                WHERE setl_id NOT IN (
                    SELECT d.setl_id FROM temp_item_usage, d
                    WHERE d.hsp_abbr = :hsp_abbr AND d.j_isin = '1' AND ({in_clause}) AND temp_item_usage.setl_id = d.setl_id
                )
            """)
            conn.execute(delete_sql, {"hsp_abbr": hsp_abbr})
        # 查询剩余的 setl_id 数量
        count_sql = text("""
            SELECT COUNT(DISTINCT setl_id) AS remaining_count
            FROM temp_item_usage
        """)
        result = conn.execute(count_sql)
        row = result.fetchone()
        remaining_count = row[0] if row else 0
        # 计算比例
        usage_rate = remaining_count / total_case_count if total_case_count > 0 else 0
        # 删除临时表
        conn.execute(text("DROP TABLE temp_item_usage"))
        return usage_rate

def _handle_one_group(engine: Engine, hsp_abbr: str, patterns_datum: dict, total_case_count: int) -> dict:
    """Handle one group of item patterns and return the usage results."""
    group_id = patterns_datum['group_id']
    group_name = patterns_datum['group_name']
    item_ptn_data = patterns_datum['item_ptn_data']
    results = {
        'group_id': group_id,
        'group_name': group_name,
        'item_patterns': [],
        'all_in_group_usage_rate': 0.0,
        'any_in_group_usage_rate': 0.0,
    }
    # 检查每个 item_ptn 的使用率
    for item_ptn_datum in item_ptn_data:
        item_pattern = item_ptn_datum['item_pattern']
        usage_rate_data = _check_single_item_ptn_usage(engine, hsp_abbr, item_ptn_datum, total_case_count)
        usage_rate = usage_rate_data['usage']
        sum_q, sum_c, sum_b = usage_rate_data['sum_q'], usage_rate_data['sum_c'], usage_rate_data['sum_b']
        results['item_patterns'].append({
            'item_pattern': item_pattern,
            'usage_rate': usage_rate,
            'sum_q': float(sum_q) if sum_q is not None else 0.0,
            'sum_c': float(sum_c) if sum_c is not None else 0.0,
            'sum_b': float(sum_b) if sum_b is not None else 0.0,
            'is_vlt': item_ptn_datum['is_vlt'],
        })
    # 检查 all_in_group_usage_rate 和 any_in_group_usage_rate
    results['all_in_group_usage_rate'] = _check_all_in_group_usage(engine, hsp_abbr, item_ptn_data, total_case_count)
    results['any_in_group_usage_rate'] = _check_any_in_group_usage(engine, hsp_abbr, item_ptn_data, total_case_count)
    # 计算同步使用 占 任意使用 的比例
    if results['any_in_group_usage_rate'] > 0:
        results['sync_usage_ratio'] = results['all_in_group_usage_rate'] / results['any_in_group_usage_rate']
    else:
        results['sync_usage_ratio'] = 0.0
    # 判断当前 group 是否违规：即 所有 item_pattern 使用率都大于0.3，且 sync_usage_ratio 大于0.9
    results['is_violate'] = all(
        item_ptn_result['usage_rate'] > 0.3 for item_ptn_result in results['item_patterns']
    ) and results['sync_usage_ratio'] > 0.9
    return results

def handle_chemic_usage(engine: Engine, hsp_abbr: str, unpacked_data: list) -> list:
    """Handle chemic usage checking for a hospital."""
    total_case_count = _check_total_case_count(engine, hsp_abbr)
    results = []
    for patterns_datum in unpacked_data:
        group_result = _handle_one_group(engine, hsp_abbr, patterns_datum, total_case_count)
        print(f"[{elapsed()}] 化验项目组：{group_result['group_id']} - {group_result['group_name']} - {'!' if group_result['is_violate'] else '~'}")
        if not group_result['is_violate']:
            continue
        results.append(group_result)
    return results

def output(usage_results: list, hsp_abbr: str):
    """Output the usage results to a excel file."""
    # 将 usage_results 转换为 DataFrame
    output_rows = []
    for group_result in usage_results:
        group_id, group_name = group_result['group_id'], group_result['group_name']
        all_in_group_usage_rate = group_result['all_in_group_usage_rate']
        any_in_group_usage_rate = group_result['any_in_group_usage_rate']
        sync_usage_ratio = group_result['sync_usage_ratio']
        msg_prefix = '按照广东省卫生健康委办公室关于印发《不合理检查检验项目或组包常见问题清单（第二批）》的通知，疑似有组套线索：'
        # 计算具体违规项目的使用情况
        msg_vlt = ''
        total_sum_q, total_sum_c, total_sum_b = 0.0, 0.0, 0.0
        for item_ptn_result in group_result['item_patterns']:
            if item_ptn_result['is_vlt'] == True:
                item_pattern = item_ptn_result['item_pattern']
                # 删除最开头的百分号
                if item_pattern.startswith('%'):
                    item_pattern = item_pattern[1:]
                # 按其他百分号 split，取第一个部分作为 item_name
                sum_q, sum_c, sum_b = item_ptn_result['sum_q'], item_ptn_result['sum_c'], item_ptn_result['sum_b']
                sum_q = round(sum_q, 2) if sum_q is not None else 0.0
                sum_c = round(sum_c, 2) if sum_c is not None else 0.0
                sum_b = round(sum_b, 2) if sum_b is not None else 0.0
                total_sum_q += sum_q
                total_sum_c += sum_c
                total_sum_b += sum_b
                msg_vlt += f"其中可能不必要的组套化验：[{item_pattern}]，总数量{sum_q}，总金额{sum_c}元，总医保内金额={sum_b}；"
        msg_vlt = msg_vlt.rstrip('；') + '。'
        msg = f"{msg_prefix} {group_name}（即文件中的第{group_id}组情形），"
        msg += f"这些化验项目出现的住院病例占全部住院病例的使用率为{any_in_group_usage_rate:.2%}，"
        msg += f"且一旦组套中的某一项出现，其他项也几乎全部出现（组套内各项同步出现的比例为{sync_usage_ratio:.2%}）。"
        msg += msg_vlt
        output_rows.append({
            "组名": group_name,
            "组ID": group_id,
            "线索违规数量": total_sum_q,
            "线索违规金额": total_sum_c,
            "线索违规医保内金额": total_sum_b,
            "依据": "广东省卫生健康委办公室《关于印发《不合理检查检验项目或组包常见问题清单（第二批）>的通知》",
            "违规信息": msg
        })
    df_output = pd.DataFrame(output_rows)
    # 判断各级输出目录是否存在，不存在则创建
    output_dir_pre = 'STEP8筛选/clue'
    os.makedirs(output_dir_pre, exist_ok=True)
    output_dir = fr'STEP8筛选/clue/{hsp_abbr}'
    os.makedirs(output_dir, exist_ok=True)
    output_path = fr'STEP8筛选/clue/{hsp_abbr}/clue867_chemic_usage_{hsp_abbr}.xlsx'
    df_output.to_excel(output_path, index=False)

# 主程序
if __name__ == "__main__":
    hsp_list = load_hsp_list('hspList.json')
    patterns_data = load_chemic_patterns('clue867_library.xlsx')

    for hsp in hsp_list:
        hsp_abbr = hsp['hsp_abbr']
        print(f"[{elapsed()}] - 处理医院：{hsp_abbr}")

        df_items = load_item_list(engine, hsp_abbr)
        unpacked_data = unpack_chemic_items(patterns_data, df_items)
        print(f"[{elapsed()}] -  解包完成，开始使用率检查")

        usage_results = handle_chemic_usage(engine, hsp_abbr, unpacked_data)
        if not usage_results:
            print(f"[{elapsed()}] -  无违规化验项目组")
            continue
        else:
            output(usage_results, hsp_abbr)
            print(f"[{elapsed()}] -  发现违规化验项目组，共 {len(usage_results)} 组")