# 检测模块 - 包含所有数据检测功能
from config import load_env_tbl_name
from sqlalchemy import text
import pandas as pd
import re
from typing import List, Dict, Tuple
import os
from sqlalchemy.engine import Engine
import json
from datetime import datetime

# 获取所有表名
SETL_TBL_NAME, MDTRT_TBL_NAME, FEE_TBL_NAME, DX_TBL_NAME, TX_TBL_NAME = load_env_tbl_name()

# 读取env_structure.xlsx 只在首次导入时读取一次
_env_structure_df = None

def _get_env_structure() -> pd.DataFrame:
    global _env_structure_df
    if _env_structure_df is None:
        _env_structure_df = pd.read_excel('env.xlsx',sheet_name='env_structure')
    return _env_structure_df.copy()

# ========== 公共工具 ==========

def get_engine() -> Engine:
    return create_db_engine()

def load_structure(df: pd.DataFrame) -> pd.DataFrame:
    required_cols = [
        'tbl_name','field_code','field_name','field_type',
        'is_primary_key','is_not_null','note','is_map','is_exhibit'
    ]
    missing = [c for c in required_cols if c not in df.columns]
    if missing:
        raise ValueError(f"env_structure缺少列: {missing}")
    # 统一字段布尔化
    for col in ['is_primary_key','is_not_null','is_map','is_exhibit']:
        df[col] = df[col].fillna(0).astype(int)
    df['tbl_name'] = df['tbl_name'].str.upper()
    df['field_code'] = df['field_code'].str.upper()
    return df

# ========== 逻辑表名 -> 物理表名 映射（来自 .env 环境变量） ==========
_LOGIC_ENV_VAR_MAP = {
    'SETL': SETL_TBL_NAME,
    'MDTRT': MDTRT_TBL_NAME,
    'FEE': FEE_TBL_NAME,
    'DX': DX_TBL_NAME,
    'TX': TX_TBL_NAME
}

def resolve_physical_table_name(logic_name: str) -> str:
    if not logic_name:
        return logic_name
    logic_upper = logic_name.upper()
    env_var = _LOGIC_ENV_VAR_MAP.get(logic_upper)
    return env_var

def enrich_structure_with_physical(struct_df: pd.DataFrame) -> pd.DataFrame:
    struct_df = struct_df.copy()
    struct_df['logic_tbl'] = struct_df['tbl_name']
    struct_df['phys_tbl'] = struct_df['tbl_name'].apply(resolve_physical_table_name)
    return struct_df


def fetch_table_columns(engine: Engine) -> pd.DataFrame:
    # 统一使用大写别名，便于后续通过 meta_df['TABLE_NAME'] 访问
    sql = """
    SELECT t.table_name AS TABLE_NAME,
           c.column_name AS COLUMN_NAME,
           c.data_type   AS DATA_TYPE,
           c.data_length AS DATA_LENGTH,
           c.char_length AS CHAR_LENGTH,
           c.data_precision AS DATA_PRECISION,
           c.data_scale AS DATA_SCALE
      FROM user_tables t
      JOIN user_tab_columns c ON t.table_name = c.table_name
    """
    df = pd.read_sql(sql, engine)
    # 再保险：统一转成大写，防止驱动返回的列名大小写不一致
    df.columns = [c.upper() for c in df.columns]
    return df


def build_primary_key_groups(struct_df: pd.DataFrame) -> Dict[str, List[str]]:
    groups = {}
    for tbl, grp in struct_df[struct_df.is_primary_key==1].groupby('tbl_name'):
        groups[tbl] = [c for c in grp['field_code'].tolist()]
    return groups

# ========== X 系列检查实现 =========
# 约定：每个函数签名 (engine, check_results, setl_count) 与其它模块一致
# 添加字段：inspect_id, inspect_name, sql, msg, info(list), count, ocp


def inspect_x01_table_exists(engine, check_results, setl_count):
    """X01: 表存在性检查 (使用物理表名)"""
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    meta_df = fetch_table_columns(engine)
    # 防御：如果仍然没有 TABLE_NAME（极端情况下），输出可用列调试
    if 'TABLE_NAME' not in meta_df.columns:
        # 记录一次调试信息，避免直接抛异常
        check_results.append({
            'inspect_id':'X01',
            'inspect_name':'表存在性检查(调试)',
            'sql':'USER_TABLES 查询',
            'msg':'元数据缺少TABLE_NAME列',
            'info':[{'COLUMNS': str(list(meta_df.columns))}],
            'count': 0,
            'ocp': 0
        })
        return
    meta_tables = set(meta_df['TABLE_NAME'].unique())

    wanted = struct_df[['logic_tbl','phys_tbl']].drop_duplicates()
    missing = []
    for _, r in wanted.iterrows():
        if r['phys_tbl'] not in meta_tables:
            missing.append({'LOGIC_TABLE': r['logic_tbl'], 'PHYS_TABLE': r['phys_tbl']})
    check_results.append({
        'inspect_id':'X01',
        'inspect_name':'表存在性检查',
        'sql':'USER_TABLES 查询',
        'msg':'缺失的表(逻辑->物理)',
        'info': missing[:50],
        'count': len(missing),
        'ocp': 0
    })
    print('============ X01：表存在性检查 FINISHED ============')


def inspect_x02_column_exists(engine, check_results, setl_count):
    """X02: 列存在性检查 (物理表名)"""
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    meta_df = fetch_table_columns(engine)
    # 转为集合加速
    meta_set = set(zip(meta_df.TABLE_NAME, meta_df.COLUMN_NAME))

    missing_cols = []
    for _, r in struct_df.iterrows():
        key = (r['phys_tbl'], r['field_code'])
        if key not in meta_set:
            missing_cols.append({'LOGIC_TABLE': r['logic_tbl'], 'PHYS_TABLE': r['phys_tbl'], 'COLUMN_NAME': r['field_code']})
    check_results.append({
        'inspect_id':'X02',
        'inspect_name':'列存在性检查',
        'sql':'USER_TAB_COLUMNS 查询',
        'msg':'缺失的列',
        'info': missing_cols[:100],
        'count': len(missing_cols),
        'ocp': 0
    })
    print('============ X02：列存在性检查 FINISHED ============')


def inspect_x03_primary_key_uniqueness(engine, check_results, setl_count, sample_limit: int = 50):
    """X03: 主键唯一性（逻辑主键组合）
    补充：返回每张表重复组合样本 (最多 sample_limit 组)。
    """
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    pk_groups = {}
    for phys_tbl, grp in struct_df[struct_df.is_primary_key==1].groupby('phys_tbl'):
        pk_groups[phys_tbl] = grp['field_code'].tolist()
    rows = []
    dup_groups_all = []
    for phys_tbl, cols in pk_groups.items():
        if not cols:
            continue
        cols_expr = ','.join(cols)
        base_stat_sql = f"SELECT COUNT(*) CNT_TOTAL, COUNT(DISTINCT {cols_expr}) CNT_DIST FROM {phys_tbl}"
        try:
            df = pd.read_sql(base_stat_sql, engine)
            total, distinct = int(df.loc[0,'CNT_TOTAL']), int(df.loc[0,'CNT_DIST'])
            dup = total - distinct
            sample_groups = []
            if dup > 0:
                group_sql = f"SELECT {cols_expr}, COUNT(*) CNT FROM {phys_tbl} GROUP BY {cols_expr} HAVING COUNT(*)>1 FETCH FIRST {sample_limit} ROWS ONLY"
                gdf = pd.read_sql(group_sql, engine)
                sample_groups = gdf.to_dict(orient='records')
            dup_groups_all.extend([{'PHYS_TABLE': phys_tbl, **rec} for rec in sample_groups])
            rows.append({'PHYS_TABLE':phys_tbl,'PK_COLUMNS':cols_expr,'TOTAL':total,'DISTINCT':distinct,'DUPLICATE':dup})
            print(f"Checked PK Uniqueness {phys_tbl} Total={total} Distinct={distinct} Dup={dup}")
        except Exception as e:
            rows.append({'PHYS_TABLE':phys_tbl,'PK_COLUMNS':cols_expr,'ERROR':str(e),'TOTAL':None,'DISTINCT':None,'DUPLICATE':None})
    check_results.append({
        'inspect_id':'X03',
        'inspect_name':'主键唯一性检查',
        'sql':'COUNT / GROUP BY',
        'msg':'主键重复统计',
        'info': rows[:100],
        'sample_dup_groups': dup_groups_all[:sample_limit],
        'count': sum(r.get('DUPLICATE',0) or 0 for r in rows if isinstance(r, dict)),
        'ocp': 0
    })
    print('============ X03：主键唯一性检查 FINISHED ============')


def inspect_x04_not_null(engine, check_results, setl_count):
    """X04: 非空列非空占比统计 (物理表名)"""
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    need = struct_df[struct_df.is_not_null==1]
    meta_df = fetch_table_columns(engine)
    column_map = {}
    for rec in meta_df.itertuples():
        table_name = getattr(rec, 'TABLE_NAME', None)
        column_name = getattr(rec, 'COLUMN_NAME', None)
        if table_name and column_name:
            table_key = str(table_name).upper()
            column_key = str(column_name).upper()
            column_map.setdefault(table_key, set()).add(column_key)
    rows = []
    groups = list(need.groupby('phys_tbl'))
    total_tables = len(groups)
    if total_tables == 0:
        print('============ X04：非空列空值检查 FINISHED ============')
    else:
        with engine.connect() as connection:
            for idx, (table, grp) in enumerate(groups, start=1):
                cols = grp['field_code'].tolist()
                if not cols:
                    continue
                actual_cols = column_map.get(table.upper(), set())
                missing_cols = [col for col in cols if col not in actual_cols]
                valid_cols = [col for col in cols if col in actual_cols]
                select_parts = ["COUNT(*) AS TOTAL_COUNT"]
                for col in valid_cols:
                    select_parts.append(f"COUNT(CASE WHEN {col} IS NOT NULL THEN 1 END) AS {col}_NON_NULL")
                sql = f"SELECT {', '.join(select_parts)} FROM {table}"
                print(f"Checking Not Null {idx}/{total_tables}: {table} ({len(cols)} columns)", end='')
                if missing_cols:
                    print(f" [missing skipped: {', '.join(missing_cols)}]", end='')
                result = connection.execute(text(sql))
                dataLower = result.mappings().first() or {}
                data = {k.upper(): v for k, v in dataLower.items()} if dataLower else {}
                total_count = data.get('TOTAL_COUNT', 0) or 0
                for col in valid_cols:
                    nn_key = f"{col}_NON_NULL"
                    non_null_count = data.get(nn_key, 0) or 0
                    null_count = total_count - non_null_count
                    ocp = (non_null_count / total_count * 100) if total_count > 0 else 0.0
                    rows.append({
                        'PHYS_TABLE': table,
                        'COLUMN': col,
                        'TOTAL_COUNT': total_count,
                        'NON_NULL_COUNT': non_null_count,
                        'NULL_COUNT': null_count,
                        'OCP_PERCENT': round(ocp, 2)
                    })
                for col in missing_cols:
                    rows.append({
                        'PHYS_TABLE': table,
                        'COLUMN': col,
                        'TOTAL_COUNT': total_count,
                        'NON_NULL_COUNT': None,
                        'NULL_COUNT': None,
                        'OCP_PERCENT': None,
                        'STATUS': 'COLUMN_NOT_FOUND'
                    })
                print(f" => table total={total_count}")
    # 将 非空列占比 保存为单独 json 与 xlsx 文件
    with open(f"step4_inspect_x04_not_null_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json", "w", encoding="utf-8") as f:
        json.dump(rows, f, ensure_ascii=False, indent=2)
    pd.DataFrame(rows).to_excel(f"step4_inspect_x04_not_null_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx", index=False)
    print()
    print('============ X04：非空列空值检查 FINISHED ============')

# 预留：X05 可做列类型/长度对比

def _parse_expected_type(type_str: str) -> Tuple[str, Dict[str, int]]:
    if not type_str:
        return '', {}
    ts = type_str.upper().strip()
    m = re.match(r'(\w+)\s*\(([^)]+)\)', ts)
    if m:
        base = m.group(1)
        nums = [n.strip() for n in m.group(2).split(',')]
        if base in ('VARCHAR2','CHAR'):
            try:
                return base, {'LEN': int(nums[0])}
            except Exception:
                return base, {}
        if base == 'NUMBER':
            try:
                if len(nums) == 1:
                    return base, {'PREC': int(nums[0])}
                elif len(nums) >= 2:
                    return base, {'PREC': int(nums[0]), 'SCALE': int(nums[1])}
            except Exception:
                return base, {}
        return base, {}
    else:
        return ts, {}

def inspect_x05_column_type_mismatch(engine, check_results, setl_count):
    """X05: 列类型/长度/精度差异精细对比 (物理表名)"""
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    meta_df = fetch_table_columns(engine)
    meta_map = {}
    for r in meta_df.itertuples():
        meta_map[(r.TABLE_NAME, r.COLUMN_NAME)] = {
            'DATA_TYPE': r.DATA_TYPE.upper() if r.DATA_TYPE and not pd.isna(r.DATA_TYPE) else None,
            'LEN': int(r.CHAR_LENGTH) if getattr(r,'CHAR_LENGTH', None) is not None and not pd.isna(r.CHAR_LENGTH) else (int(r.DATA_LENGTH) if r.DATA_LENGTH is not None and not pd.isna(r.DATA_LENGTH) else None),
            'PREC': int(r.DATA_PRECISION) if r.DATA_PRECISION is not None and not pd.isna(r.DATA_PRECISION) else None,
            'SCALE': int(r.DATA_SCALE) if r.DATA_SCALE is not None and not pd.isna(r.DATA_SCALE) else None
        }
    diffs = []
    for _, r in struct_df.iterrows():
        key = (r['phys_tbl'], r['field_code'])
        expect_raw = r['field_type'] or ''
        exp_type, exp_meta = _parse_expected_type(expect_raw)
        actual = meta_map.get(key)
        if not actual:
            continue  # 已由缺失列检查覆盖
        mismatch = False
        detail_parts = []
        if exp_type and actual['DATA_TYPE'] and exp_type != actual['DATA_TYPE']:
            mismatch = True
            detail_parts.append(f"TYPE exp={exp_type} act={actual['DATA_TYPE']}")
        # 长度/精度
        if exp_type in ('VARCHAR2','CHAR') and 'LEN' in exp_meta:
            if actual['LEN'] is not None and exp_meta['LEN'] != actual['LEN']:
                mismatch = True
                detail_parts.append(f"LEN exp={exp_meta['LEN']} act={actual['LEN']}")
        if exp_type == 'NUMBER':
            if 'PREC' in exp_meta and exp_meta['PREC'] != (actual['PREC'] or 0):
                mismatch = True
                detail_parts.append(f"PREC exp={exp_meta['PREC']} act={actual['PREC']}")
            if 'SCALE' in exp_meta and exp_meta['SCALE'] != (actual['SCALE'] or 0):
                mismatch = True
                detail_parts.append(f"SCALE exp={exp_meta['SCALE']} act={actual['SCALE']}")
        if mismatch:
            diffs.append({
                'PHYS_TABLE': r['phys_tbl'],
                'COLUMN': r['field_code'],
                'EXPECTED': expect_raw.upper(),
                'ACTUAL_TYPE': actual['DATA_TYPE'],
                'ACTUAL_LEN': actual['LEN'],
                'ACTUAL_PREC': actual['PREC'],
                'ACTUAL_SCALE': actual['SCALE'],
                'DETAIL': '; '.join(detail_parts)
            })
    check_results.append({
        'inspect_id':'X05',
        'inspect_name':'列类型差异检查',
        'sql':'USER_TAB_COLUMNS 类型/精度 对比',
        'msg':'类型不匹配',
        'info': diffs[:200],
        'count': len(diffs),
        'ocp': 0
    })
    print('============ X05：列类型差异检查 FINISHED ============')

def inspect_x06_extra_columns(engine, check_results, setl_count):
    """X06: 多余列检查——数据库存在但结构定义中没有的列 (物理表名)"""
    struct_df = enrich_structure_with_physical(load_structure(_get_env_structure()))
    defined_tables = struct_df['phys_tbl'].unique().tolist()
    defined_cols_set = set(zip(struct_df['phys_tbl'], struct_df['field_code']))
    meta_df = fetch_table_columns(engine)
    extras = []
    for r in meta_df.itertuples():
        # 使用大写属性名（与列名一致），避免 AttributeError
        tbl = getattr(r, 'TABLE_NAME', None)
        col = getattr(r, 'COLUMN_NAME', None)
        dtype = getattr(r, 'DATA_TYPE', None)
        if tbl in defined_tables and (tbl, col) not in defined_cols_set:
            extras.append({'PHYS_TABLE': tbl, 'COLUMN': col, 'DATA_TYPE': dtype})
    check_results.append({
        'inspect_id':'X06',
        'inspect_name':'多余列检查',
        'sql':'USER_TAB_COLUMNS 反向对比',
        'msg':'未在结构定义中的列',
        'info': extras[:200],
        'count': len(extras),
        'ocp': 0
    })
    print('============ X06：多余列检查 FINISHED ============')

def persist_x_issues_to_struct_table(engine, check_results):
    """增强5: 将 X 系列结果写入 STRUCT_ISSUE_LOG 表
    字段: ISSUE_ID, INSPECT_ID, TABLE_NAME, COLUMN_NAME, ISSUE_TYPE, DETAIL, CREATED_AT
    ISSUE_ID 使用序列逻辑: 当前最大+1 (若无序列)。
    """
    x_ids = {f'X0{i}' for i in range(1,7)} | {'X05'}
    rows = []
    now_sql = "SYSDATE"  # 使用数据库时间
    for r in check_results:
        if r.get('inspect_id') in x_ids:
            inspect_id = r.get('inspect_id')
            # info 里可能是列表，包含不同结构
            info_list = r.get('info') or []
            # 兼容 sample_dup_groups
            if inspect_id == 'X03' and r.get('sample_dup_groups'):
                info_list = info_list + r['sample_dup_groups']
            for item in info_list:
                table_name = item.get('TABLE') or item.get('TABLE_NAME')
                column_name = item.get('COLUMN') or item.get('COLUMN_NAME')
                detail = []
                for k,v in item.items():
                    if k.upper() in ('TABLE','TABLE_NAME','COLUMN','COLUMN_NAME'): continue
                    detail.append(f"{k}={v}")
                detail_str = '; '.join(detail)[:1800]
                rows.append((inspect_id, table_name, column_name, r.get('msg'), detail_str))
    if not rows:
        return
    with engine.connect() as conn:
        # 建表
        conn.execute(text("""
            BEGIN
                EXECUTE IMMEDIATE 'CREATE TABLE STRUCT_ISSUE_LOG (
                    ID NUMBER PRIMARY KEY,
                    INSPECT_ID VARCHAR2(10),
                    TABLE_NAME VARCHAR2(100),
                    COLUMN_NAME VARCHAR2(100),
                    ISSUE_TYPE VARCHAR2(200),
                    DETAIL VARCHAR2(2000),
                    CREATED_AT DATE
                )';
            EXCEPTION WHEN OTHERS THEN
                IF SQLCODE != -955 THEN RAISE; END IF; -- 已存在
            END;"""))
        # 获取当前最大ID
        result = conn.execute(text("SELECT NVL(MAX(ID),0) FROM STRUCT_ISSUE_LOG"))
        start_id = result.scalar() or 0
        insert_sql = text("""
            INSERT INTO STRUCT_ISSUE_LOG (ID, INSPECT_ID, TABLE_NAME, COLUMN_NAME, ISSUE_TYPE, DETAIL, CREATED_AT)
            VALUES (:id, :inspect_id, :table_name, :column_name, :issue_type, :detail, SYSDATE)
        """)
        id_counter = start_id
        for rec in rows:
            id_counter += 1
            conn.execute(insert_sql, {
                'id': id_counter,
                'inspect_id': rec[0],
                'table_name': rec[1],
                'column_name': rec[2],
                'issue_type': rec[3],
                'detail': rec[4]
            })
        conn.commit()


def persist_x_issues_to_dirty_data(engine, check_results):
    """增强6: 将 X 系列结构问题写入 DIRTY_DATA (SETL_ID 置 NULL)"""
    x_ids = {f'X0{i}' for i in range(1,7)} | {'X05'}
    rows = []
    for r in check_results:
        if r.get('inspect_id') in x_ids:
            inspect_id = r['inspect_id']
            info_items = r.get('info') or []
            if inspect_id == 'X03' and r.get('sample_dup_groups'):
                info_items = info_items + r['sample_dup_groups']
            for item in info_items:
                table_name = item.get('TABLE') or item.get('TABLE_NAME')
                column_name = item.get('COLUMN') or item.get('COLUMN_NAME')
                rows.append((inspect_id, r.get('inspect_name'), f"{table_name}.{column_name}" if table_name or column_name else r.get('msg')))
    if not rows:
        return
    with engine.connect() as conn:
        insert_sql = text("""
            INSERT INTO DIRTY_DATA (SETL_ID, INSPECT_ID, INSPECT_NAME, LV, INFO)
            VALUES (NULL, :inspect_id, :inspect_name, 'STRUCT', :info)
        """)
        for rec in rows:
            conn.execute(insert_sql, {
                'inspect_id': rec[0],
                'inspect_name': rec[1],
                'info': rec[2][:1900]
            })
        conn.commit()


