# clean_fault.py
import sys
from utils import load_csv, save_df, validate_numeric_range, validate_enum, validate_region_consistency, logger
import pandas as pd
import os
# 获取当前脚本的目录（data_cleaning），然后获取其父目录（项目根目录）
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 将项目根目录添加到sys.path
if project_root not in sys.path:
    sys.path.insert(0, project_root)
from config import OUT_REGION, PATH_FAULT, OUT_FAULT, ALLOWED_FAULT_TYPES, MAX_DURATION_MINUTES


# 确保输出目录存在
os.makedirs(os.path.dirname(OUT_FAULT), exist_ok=True)

def clean_fault_event():
    logger.info("开始清洗 fault_event.csv ...")

    # 原始数据加载
    df = load_csv(PATH_FAULT, "故障事件数据")
    if df is None or df.empty:
        logger.error("加载的故障事件数据为空或加载失败，终止清洗。")
        return

    # 阶段 1: 缺失值处理
    logger.info("阶段 1: 处理缺失值...")
    df.dropna(subset=['region_id'], inplace=True)
    df['fault_type'] = df['fault_type'].fillna('Unknown')
    save_df(df, OUT_FAULT.replace('.csv', '_stage1.csv'), "缺失值处理后")

    # 阶段 2: 数据类型标准化
    logger.info("阶段 2: 数据类型标准化...")
    df['region_id'] = df['region_id'].astype(str)
    df['event_time'] = pd.to_datetime(df['event_time'], errors='coerce')
    df.dropna(subset=['event_time'], inplace=True)
    save_df(df, OUT_FAULT.replace('.csv', '_stage2.csv'), "类型标准化后")

    # 加载合法 region_id（用于后续校验）
    try:
        df_region = pd.read_csv(OUT_REGION)
        valid_region_ids = set(df_region['region_id'].astype(str))  # 确保 region_id 为字符串
    except Exception as e:
        logger.error(f"无法加载 region_topology_clean.csv: {e}")
        return

    # 阶段 3: 数值字段范围校验
    logger.info("阶段 3: 数值字段范围校验...")
    mask_duration = validate_numeric_range(
        df['duration_minutes'], min_val=0.1, max_val=MAX_DURATION_MINUTES, field_name='duration_minutes'
    )
    mask_days = validate_numeric_range(df['last_occurrence_days_ago'], min_val=0, field_name='last_occurrence_days_ago')
    df = df[mask_duration & mask_days]
    save_df(df, OUT_FAULT.replace('.csv', '_stage3.csv'), "数值范围校验后")

    # 阶段 4: 枚举字段校验
    logger.info("阶段 4: 枚举字段校验...")
    df['fault_type'] = df['fault_type'].str.upper()
    # 使用utils模块中的函数
    mask_ftype = validate_enum(df['fault_type'], ALLOWED_FAULT_TYPES, 'fault_type')
    df = df[mask_ftype]
    save_df(df, OUT_FAULT.replace('.csv', '_stage4.csv'), "枚举字段校验后")

    # 阶段 5: affected_nodes 区域一致性校验
    logger.info("阶段 5: affected_nodes 拓扑一致性校验...")
    try:
        # 解析字符串为列表
        df['affected_nodes_list'] = df['affected_nodes'].apply(lambda x: eval(x) if isinstance(x, str) else [])
        df['nodes_valid'] = df['affected_nodes_list'].apply(
            lambda nodes: isinstance(nodes, list) and all(isinstance(n, str) and n in valid_region_ids for n in nodes)
        )
        invalid_count = (~df['nodes_valid']).sum()
        if invalid_count > 0:
            logger.warning(f"有 {invalid_count} 条记录的 affected_nodes 包含非法 region_id，已过滤。")
        df = df[df['nodes_valid']].copy()
        # 清理临时列
        df.drop(columns=['affected_nodes_list', 'nodes_valid'], inplace=True, errors='ignore')
    except Exception as e:
        logger.error(f"解析 affected_nodes 失败: {e}")
        return
    save_df(df, OUT_FAULT.replace('.csv', '_stage5.csv'), "拓扑一致性校验后")

    # 阶段 6: 去重
    logger.info("阶段 6: 去重处理...")
    df.drop_duplicates(subset=['event_id'], inplace=True)
    save_df(df, OUT_FAULT.replace('.csv', '_stage6.csv'), "去重后")

    # 最终输出
    save_df(df, OUT_FAULT, "清洗后的故障事件")
    logger.info(f"fault_event 清洗完成，最终保留 {len(df)} 条有效记录。")
    logger.info(f"所有中间阶段结果已保存至 {os.path.dirname(OUT_FAULT)} 目录。")


if __name__ == "__main__":
    clean_fault_event()