"""step5_0_create_map_views

生成五个映射视图：v_setl_map, v_mdtrt_map, v_fee_map, v_dx_map, v_tx_map。
规则：
- 基于 env_structure.xlsx: 需要列 tbl_name, field_code, core, field_name, is_map
- 物理来源列固定使用 field_code，不再把 core 当作物理列；core 可被未来用作英文别名层，这里忽略。
- 输出中文字段（别名 = field_name；若 field_name 为空则用 field_code）
- 对 is_map=1 的字段，增加一个 <中文名或field_code> 字段，原字段改为 <中文名或field_code>编码：
    取得逻辑： 左外关联 MAP_DICT m (m.dic_type_code = field_code) AND m.NAT_DIC_VAL_CODE = 源列值
    若命中使用 m.NAT_DIC_VAL_NAME 否则回退源列值
- 不在源物理表中的列跳过并打印告警
- 视图 FROM 使用基础物理表 (与 core 视图相同解析逻辑)，不依赖先前创建的 v_xxx

依赖：MAP_DICT (若不存在用户应先运行生成脚本)，config.create_db_engine 以及 .env 环境变量中表名映射。

执行：python STEP5干净表/step5_0_create_map_views.py
"""
from __future__ import annotations
import os
import sys
import pandas as pd
from sqlalchemy import text
from sqlalchemy.engine import Engine

ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if ROOT_DIR not in sys.path:
    sys.path.append(ROOT_DIR)
from config import create_db_engine, load_env_tbl_name

STRUCTURE_FILE = 'env.xlsx'
STRUCTURE_SHEET = 'env_structure'
MAP_FILE = 'env.xlsx'
MAP_SHEET = 'env_map'
LOGIC_TABLES = ['SETL', 'MDTRT', 'FEE', 'DX', 'TX']

# 获得所有表名
SETL_TBL_NAME, MDTRT_TBL_NAME, FEE_TBL_NAME, DX_TBL_NAME, TX_TBL_NAME = load_env_tbl_name()

ENV_VAR_MAP = {
    'SETL': SETL_TBL_NAME,
    'MDTRT': MDTRT_TBL_NAME,
    'FEE': FEE_TBL_NAME,
    'DX': DX_TBL_NAME,
    'TX': TX_TBL_NAME,
}


def resolve_phys(logic_name: str) -> str:
    env_var = ENV_VAR_MAP.get(logic_name.upper())
    return env_var

def load_structure() -> pd.DataFrame:
    df = pd.read_excel(STRUCTURE_FILE, sheet_name=STRUCTURE_SHEET)
    needed = ['tbl_name', 'field_code', 'core', 'field_name', 'is_map']
    for c in needed:
        if c not in df.columns:
            raise ValueError(f'env_structure.xlsx 缺少列: {c}')
    df['tbl_name'] = df['tbl_name'].str.upper()
    df['field_code'] = df['field_code'].str.upper()
    df['core'] = df['core'].fillna('').astype(str).str.strip().str.upper()
    df['is_map'] = df['is_map'].fillna(0).astype(int)
    df['field_name'] = df['field_name'].fillna('').astype(str).str.strip()
    return df[df['tbl_name'].isin(LOGIC_TABLES)].copy()


def column_exists(engine: Engine, table_name: str, column_name: str) -> bool:
    sql = text('SELECT 1 FROM user_tab_columns WHERE table_name = :t AND column_name = :c')
    with engine.connect() as conn:
        r = conn.execute(sql, {'t': table_name.upper(), 'c': column_name.upper()}).fetchone()
        return r is not None


def safe_alias(name: str) -> str:
    # 中文/特殊字符 -> 用双引号包装；内部若含双引号可替换或移除
    # 这里假设 field_name 可直接作为别名（Oracle 允许双引号保留大小写与中文）
    if not name:
        return ''
    cleaned = name.replace('"', '')
    return f'"{cleaned}"'


def build_view_sql(engine: Engine, logic_table: str, df_sub: pd.DataFrame) -> str:
    phys = resolve_phys(logic_table)
    select_items = []
    missing = []
    joins = []  # 每个需要映射字段一个 JOIN（可后续优化为单次子查询，先保持清晰）
    join_idx = 0

    for row in df_sub.itertuples(index=False):
        source_col = row.field_code  # 始终使用 field_code 作为物理列
        target_alias_base = row.field_name if row.field_name else row.field_code
        if not column_exists(engine, phys, source_col):
            missing.append(source_col)
            continue
        if row.is_map != 1:
            # 原字段（中文别名）若不需要字典映射
            target_alias_expr = safe_alias(target_alias_base)
            select_items.append(f"t.{source_col} AS {target_alias_expr}")
        else:
            # 若需要字典映射，原字段
            target_alias_expr = safe_alias(target_alias_base+'编码')
            select_items.append(f"t.{source_col} AS {target_alias_expr}")
            # 映射字段
            join_idx += 1
            mj_alias = f"m{join_idx}"
            # 左连接 MAP_DICT
            joins.append(f"LEFT JOIN MAP_DICT {mj_alias} ON {mj_alias}.DIC_TYPE_CODE = '{row.field_code}' AND {mj_alias}.NAT_DIC_VAL_CODE = t.{source_col}")
            map_alias_expr = safe_alias(target_alias_base)
            select_items.append(f"COALESCE({mj_alias}.NAT_DIC_VAL_NAME, t.{source_col}) AS {map_alias_expr}")

    if not select_items:
        raise RuntimeError(f"{logic_table} 无可用字段，检查结构配置或物理表。")

    sel = ',\n       '.join(select_items)
    join_clause = '\n'.join(joins)
    sql = f"CREATE OR REPLACE VIEW V_{logic_table.lower()}_MAP AS\nSELECT {sel}\nFROM {phys} t\n{join_clause}"
    return sql, missing


def create_views(engine: Engine, struct_df: pd.DataFrame):
    for logic in LOGIC_TABLES:
        df_sub = struct_df[struct_df['tbl_name'] == logic]
        if df_sub.empty:
            print(f"[WARN] {logic} 在结构表中无字段，跳过。")
            continue
        sql, missing = build_view_sql(engine, logic, df_sub)
        with engine.begin() as conn:
            conn.execute(text(sql))
        print(f"[VIEW] V_{logic.lower()}_MAP 创建完成, 字段(含映射)数={len([c for c in df_sub.field_code]) + df_sub['is_map'].sum()}, 缺失物理列={len(missing)}")
        if missing:
            print(f"  -> 缺失列: {', '.join(missing[:10])}{' ...' if len(missing)>10 else ''}")

# ------------------------------
#  MAP_DICT 构建
# ------------------------------

def read_env_map(file_path: str = MAP_FILE) -> pd.DataFrame:
    """读取 env_map.xlsx 并标准化列名。

    期望列(不区分大小写):
      dic_type_code  字典类型(关联到 field_code)
      nat_dic_val_code  原始值
      nat_dic_val_name  映射显示值
      order_no (可选)
      src (可选)

    额外列将忽略; 缺少必需列抛错。
    """
    df = pd.read_excel(file_path, sheet_name=MAP_SHEET)
    # 统一列名小写
    df.columns = [c.strip().lower() for c in df.columns]
    required = ['dic_type_code','nat_dic_val_code','nat_dic_val_name']
    for c in required:
        if c not in df.columns:
            raise ValueError(f'env_map.xlsx 缺少列: {c}')
    # 保留需要列
    keep = [c for c in ['dic_type_code','nat_dic_val_code','nat_dic_val_name','order_no','src'] if c in df.columns]
    df = df[keep].copy()
    # 清洗
    for c in ['dic_type_code','nat_dic_val_code','nat_dic_val_name','src']:
        if c in df.columns:
            df[c] = df[c].astype(str).str.strip()
    if 'order_no' in df.columns:
        df['order_no'] = pd.to_numeric(df['order_no'], errors='coerce')
    # 删除重复行，同一个 dic_type_code + nat_dic_val_code 保留第一行
    df = df.drop_duplicates(subset=['dic_type_code','nat_dic_val_code'], keep='first')    
    print(f"[LOAD] env_map 读取完成 行数={len(df)}")
    return df

def create_map_dict_table(engine: Engine):
    ddl = """
    CREATE TABLE MAP_DICT (
        DIC_TYPE_CODE      VARCHAR2(100),
        NAT_DIC_VAL_CODE   VARCHAR2(400),
        NAT_DIC_VAL_NAME   VARCHAR2(400),
        ORDER_NO           NUMBER,
        SRC                VARCHAR2(100),
        UPDATED_AT         DATE DEFAULT SYSDATE
    )
    """
    idxs = [
        "CREATE INDEX MAP_DICT_TYPE_CODE_IDX ON MAP_DICT(DIC_TYPE_CODE)",
        "CREATE INDEX MAP_DICT_TYPE_VAL_IDX ON MAP_DICT(DIC_TYPE_CODE, NAT_DIC_VAL_CODE)"
    ]
    with engine.begin() as conn:
        # 检查是否已存在
        exists = conn.execute(text("SELECT 1 FROM user_tables WHERE table_name='MAP_DICT'" )).fetchone()
        if exists:
            print('[INFO] MAP_DICT 已存在，跳过建表')
            return
        conn.execute(text(ddl))
        for i in idxs:
            try:
                conn.execute(text(i))
            except Exception:
                pass
    print('[DDL] MAP_DICT 创建完成')

def load_map_dict_into_db(engine: Engine, df: pd.DataFrame):
    if df.empty:
        print('[WARN] env_map 为空，跳过数据装载')
        return
    # 采用 TRUNCATE + 批量插入
    with engine.begin() as conn:
        conn.execute(text('TRUNCATE TABLE MAP_DICT'))
    rows = df.to_dict('records')
    insert_sql = text("""
        INSERT INTO MAP_DICT (DIC_TYPE_CODE, NAT_DIC_VAL_CODE, NAT_DIC_VAL_NAME, ORDER_NO, SRC)
        VALUES (:DIC_TYPE_CODE, :NAT_DIC_VAL_CODE, :NAT_DIC_VAL_NAME, :ORDER_NO, :SRC)
    """
    )
    # 补足缺失列
    for r in rows:
        r.setdefault('order_no', None)
        r.setdefault('src', None)
        # 转为大写以便和结构 field_code 匹配（类型代码用大写）
        if 'dic_type_code' in r and r['dic_type_code'] is not None:
            r['dic_type_code'] = str(r['dic_type_code']).upper()
    with engine.begin() as conn:
        conn.execute(insert_sql, [
            {
                'DIC_TYPE_CODE': r.get('dic_type_code'),
                'NAT_DIC_VAL_CODE': r.get('nat_dic_val_code'),
                'NAT_DIC_VAL_NAME': r.get('nat_dic_val_name'),
                'ORDER_NO': r.get('order_no'),
                'SRC': r.get('src')
            } for r in rows
        ])
    print(f"[LOAD] MAP_DICT 装载完成 行数={len(rows)}")


def main():
    engine = create_db_engine()
    struct_df = load_structure()
    # 先确保 MAP_DICT 存在并装载最新字典
    create_map_dict_table(engine)
    try:
        map_df = read_env_map(MAP_FILE)
        load_map_dict_into_db(engine, map_df)
    except FileNotFoundError:
        print('[WARN] 未找到 env_map.xlsx，继续仅创建视图（映射值将退化为原值）')
    create_views(engine, struct_df)
    print('[DONE] 映射视图全部生成完成')


if __name__ == '__main__':
    main()
