# feature_engineering.py
import pandas as pd
import numpy as np
import ast
from datetime import datetime
import os
import logging
import sys
import os

# 获取当前脚本的绝对路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 添加父目录到 sys.path
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)

from config import FEATYRES_INPUT, OUTPUT_FEATYRES

# -------------------------------
# 日志配置
# -------------------------------
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# -------------------------------
# 路径配置
# -------------------------------
INPUT_PATH = FEATYRES_INPUT
OUTPUT_PATH = OUTPUT_FEATYRES

# 确保输出目录存在
os.makedirs(os.path.dirname(OUTPUT_PATH), exist_ok=True)

# -------------------------------
# 核心特征工程函数
# -------------------------------
def extract_features(df: pd.DataFrame) -> pd.DataFrame:
    """
    对城域网流量数据进行特征工程
    """
    logger.info(" 开始特征工程处理...")

    # 创建副本避免警告
    df = df.copy()

    # 确保时间字段为 datetime
    df['timestamp'] = pd.to_datetime(df['timestamp'])
    df['timestamp_5min'] = pd.to_datetime(df['timestamp_5min'])

    # -------------------------------
    # 1.  时间特征
    # -------------------------------
    df['hour_of_day'] = df['timestamp'].dt.hour
    df['day_of_week'] = df['timestamp'].dt.dayofweek  # 0=Mon, 6=Sun
    df['day_of_month'] = df['timestamp'].dt.day
    df['is_weekend'] = (df['day_of_week'] >= 5).astype(int)
    df['is_peak_hour'] = df['hour_of_day'].isin([7,8,9,17,18,19]).astype(int)
    df['is_business_hours'] = ((df['day_of_week'] < 5) & 
                               (df['hour_of_day'].between(9,17))).astype(int)
    df['time_of_day_bin'] = pd.cut(
        df['hour_of_day'],
        bins=[-1, 6, 12, 18, 24],
        labels=[ 'night' , 'morning', 'afternoon', 'evening'],
        include_lowest=True
    ).astype(str)
    df['is_holiday'] = df['day_of_month'].isin([5,20]).astype(int)

    #  阶段 1 保存
    stage_dir = os.path.join(os.path.dirname(OUTPUT_PATH))
    os.makedirs(stage_dir, exist_ok=True)
    df.to_csv(os.path.join(stage_dir, 'stage_1_time_features.csv'), index=False)
    logger.info(f"  已保存阶段1: 时间特征 → {os.path.join(stage_dir, 'stage_1_time_features.csv')}")

    # -------------------------------
    # 2.  空间与层级特征
    # -------------------------------
    def safe_eval(x):
        try:
            return ast.literal_eval(x) if isinstance(x, str) else []
        except Exception as e:
            logger.warning(f"解析 adjacent_regions 失败: {x}, 错误: {e}")
            return []

    df['adjacent_list'] = df['adjacent_regions'].apply(safe_eval)
    df['adjacent_count'] = df['adjacent_list'].apply(len)
    df['region_level'] = df['parent_region'].apply(lambda x: 'core' if x == 'CN_GZ' else 'edge')
    df['is_core_region'] = (df['region_level'] == 'core').astype(int)
    df['is_core_node'] = ((df['is_core_region'] == 1) & (df['adjacent_count'] >= 3)).astype(int)

    #  阶段 2 保存
    df.to_csv(os.path.join(stage_dir, 'stage_2_spatial_features.csv'), index=False)
    logger.info(f"  已保存阶段2: 空间与层级特征 → {os.path.join(stage_dir, 'stage_2_spatial_features.csv')}")

    # -------------------------------
    # 3.  事件与状态特征
    # -------------------------------
    # 初始化故障影响得分为0
    df['fault_impact_score'] = 0
    # 创建故障掩码，用于标识哪些行是故障期间
    fault_mask = df['is_fault_period'] == True
    # 对故障期间的行计算故障影响得分：30乘以相邻计数
    df.loc[fault_mask, 'fault_impact_score'] = 30 * df.loc[fault_mask, 'adjacent_count']
    # 创建DDoS攻击标志，将布尔值转换为整数（1表示是，0表示否）
    df['ddos_attack_flag'] = (df['fault_type'] == 'DDOS_ATTACK').astype(int)
    # 创建维护标志，将非空值转换为1，空值转换为0
    df['maintenance_flag'] = df['maintenance_cycle_days'].notna().astype(int)
    # 创建严重性级别映射字典
    severity_map = {'LOW': 1, 'MEDIUM': 2, 'HIGH': 3, 'CRITICAL': 4}
    df['severity_level_encoded'] = df['severity_level'].map(severity_map).fillna(0).astype(int)

    #  阶段 3 保存
    df.to_csv(os.path.join(stage_dir, 'stage_3_event_features.csv'), index=False)
    logger.info(f"  已保存阶段3: 事件与状态特征 → {os.path.join(stage_dir, 'stage_3_event_features.csv')}")

    # -------------------------------
    # 4.  派生与冗余字段利用
    # -------------------------------
    # 定义采样间隔为300秒（5分钟）
    SAMPLING_INTERVAL_SEC = 300  
    # 计算输入流量速率（Mbps）：将字节数转换为比特，除以采样间隔和1e6（兆）
    df['in_mbps_calculated'] = df['in_bytes'] * 8 / (SAMPLING_INTERVAL_SEC * 1e6)
    # 计算输入流量速率的绝对误差：实际值与计算值的差的绝对值
    df['in_mbps_error_abs'] = abs(df['in_mbps'] - df['in_mbps_calculated'])
    # 计算输入流量速率的相对误差：绝对误差除以计算值（加1e-8防止除以零）
    df['in_mbps_error_ratio'] = df['in_mbps_error_abs'] / (df['in_mbps_calculated'] + 1e-8)
    
    # 计算输入输出流量比：输入字节数除以输出字节数（加1防止除以零）
    df['in_out_ratio'] = df['in_bytes'] / (df['out_bytes'] + 1)
    # 计算总流量速率：输入和输出速率之和
    df['total_mbps'] = df['in_mbps'] + df['out_mbps']
    # 计算负载与容量比：负载比率除以带宽容量（转换为Mbps，加1e-8防止除以零）
    df['load_ratio_vs_capacity'] = df['traffic_load_ratio'] / (df['bandwidth_capacity'] / 1e6 + 1e-8)
    # 计算设备输入流量的Z分数：按设备分组计算输入流量的标准化值
   
    df['device_in_mbps_zscore'] = df.groupby('device_id')['in_mbps'].transform(
        lambda x: (x - x.mean()) / (x.std()+1e-6)  # 加1e-6防止标准差为零
    )
    # 将协议类型转换为虚拟变量（one-hot编码）
    protocol_dummies = pd.get_dummies(df['protocol_type'], prefix='proto')
    # 将生成的虚拟变量合并到原始数据框中
    df = pd.concat([df, protocol_dummies], axis=1)

    #  阶段 4 保存
    df.to_csv(os.path.join(stage_dir, 'stage_4_derived_features.csv'), index=False)
    logger.info(f"  已保存阶段4: 派生特征 → {os.path.join(stage_dir, 'stage_4_derived_features.csv')}")

    # -------------------------------
    # 5.  高阶特征
    # -------------------------------
    df['hour_sin'] = np.sin(2 * np.pi * df['hour_of_day'] / 24)
    df['hour_cos'] = np.cos(2 * np.pi * df['hour_of_day'] / 24)
    # df['has_valid_backup'] = df.get('backup_region_valid', 0).astype(int)
    #  阶段 5 保存
    df.to_csv(os.path.join(stage_dir, 'stage_5_advanced_features.csv'), index=False)
    logger.info(f"  已保存阶段5: 高阶特征 → {os.path.join(stage_dir, 'stage_5_advanced_features.csv')}")

    # -------------------------------
    # 6.  清理与重命名
    # -------------------------------
    cols_to_drop = ['adjacent_list', 'in_mbps_calculated']
    df.drop(columns=[col for col in cols_to_drop if col in df.columns], inplace=True)
    df.rename(columns={
        'in_mbps': 'in_mbps_measured',
        'out_mbps': 'out_mbps_measured'
    }, inplace=True)
    bool_cols = df.select_dtypes(include=['bool']).columns
    df[bool_cols] = df[bool_cols].astype(int)

    #  阶段 6 保存（最终特征）
    df.to_csv(os.path.join(stage_dir, 'stage_6_final_features.csv'), index=False)
    logger.info(f"  已保存阶段6: 最终特征 → {os.path.join(stage_dir, 'stage_6_final_features.csv')}")

    logger.info(f" 特征工程完成，共生成 {len(df.columns)} 个特征字段。")
    return df

# -------------------------------
# 主函数
# -------------------------------
def main():
    logger.info(f" 读取数据: {INPUT_PATH}")
    try:
        df = pd.read_csv(INPUT_PATH)
        logger.info(f" 读取成功，数据形状: {df.shape}")
    except Exception as e:
        logger.error(f" 读取失败: {e}")
        return

    # 执行特征工程
    df_features = extract_features(df)

    # 保存为 CSV
    logger.info(f" 保存特征数据到: {OUTPUT_PATH}")
    try:
        df_features.to_csv(OUTPUT_PATH, index=False, encoding='utf-8')
        logger.info(f" 特征工程完成，已保存为 CSV，共 {len(df_features)} 行，{len(df_features.columns)} 列。")
        
        # 输出前几列字段名，便于验证
        print("\n 输出字段预览（前10个）:")
        print(df_features.columns.tolist()[:10])
        print(f"...\n共 {len(df_features.columns)} 个字段")
        
    except Exception as e:
        logger.error(f" 保存 CSV 失败: {e}")

if __name__ == "__main__":
    main()