from concurrent.futures import ThreadPoolExecutor
import json
import pandas as pd
from typing import Dict, Callable, List
import re
class FeatureProcessor:
    """特征处理器注册中心"""
    _processors: Dict[str, Dict] = {}

    @classmethod
    def register(cls, 
               feature_name: str, 
               dependencies: List[str] = None,
               required_fields: List[str] = None):
        """注册特征处理器的装饰器
        
        Args:
            feature_name: 特征名称标识
            dependencies: 依赖的其他特征处理器
            required_fields: 需要的原始字段列表
        """
        def decorator(func: Callable):
            cls._processors[feature_name] = {
                'func': func,
                'deps': dependencies or [],
                'fields': required_fields or []
            }
            return func
        return decorator

    @classmethod
    def get_processing_order(cls) -> List[str]:
        """获取拓扑排序后的执行顺序"""
        order = []
        visited = set()

        def visit(feature):
            if feature not in visited:
                visited.add(feature)
                for dep in cls._processors[feature]['deps']:
                    visit(dep)
                order.append(feature)

        for feature in cls._processors:
            visit(feature)
        return order

class LogPreprocessor:
    def __init__(self):
        self.processing_order = FeatureProcessor.get_processing_order()
        
    def safe_split(self, value, delimiter, maxsplit=1):
        """安全分割字符串，避免IndexError"""
        parts = value.split(delimiter, maxsplit)
        if len(parts) == maxsplit + 1:
            return parts
        else:
            return [value] + [''] * (maxsplit - len(parts) + 1)

    def _parse_log_entry(self, raw_event: Dict) -> Dict:
        """整合原有 parse_log_entry 逻辑""" 
        parsed = {}

        # 提取顶层字段
        top_level_fields = [
            'Computer', 'EventId', 'EventRecordId', 'Channel',
            'Provider', 'TimeCreated', 'MapDescription', 'ChunkNumber'
        ]
        for field in top_level_fields:
            parsed[field] = raw_event.get(field, None)

        # 时间字段转换
        parsed['TimeCreated'] = pd.to_datetime(parsed['TimeCreated'], errors='coerce')

        # 解析 PayloadData1-4
        payload_data_fields = ['PayloadData1', 'PayloadData2', 'PayloadData3', 'PayloadData4']
        for field in payload_data_fields:
            value = raw_event.get(field, '')
            key_part, val_part = self.safe_split(value, ': ', 1)[:2]
            clean_key = key_part.strip().replace(' ', '_')
            parsed[f"Payload_{clean_key}"] = val_part.strip()

        # 解析 UserName 字段
        user_name = raw_event.get('UserName', '')
        user_match = re.match(r'(.*?)\\(.*?)(\s*\((.*?)\))?', user_name)
        if user_match:
            parsed['User_Domain'] = user_match.group(1)
            parsed['User_Name'] = user_match.group(2).strip()
            parsed['User_Sid'] = user_match.group(4) if user_match.group(4) else None
        else:
            parsed['User_Domain'], parsed['User_Name'], parsed['User_Sid'] = None, user_name, None

        # 解析 Payload 中的嵌套 JSON
        try:
            payload = json.loads(raw_event.get('Payload', '{}'))
            data_items = payload.get('EventData', {}).get('Data', [])
            for item in data_items:
                key = item.get('@Name', 'UnknownField')
                value = item.get('#text', '')
                parsed[key] = value
        except (json.JSONDecodeError, AttributeError) as e:
            # print(f"Payload 解析错误: {str(e)}")
            pass

        return parsed
    

    def process(self, file_path: str) -> pd.DataFrame:
        """从文件路径读取日志并处理（多线程优化版）"""
        with open(file_path, 'r', encoding='utf-8-sig') as f:
            log_lines = [line.strip() for line in f]

        # 使用线程池并行解析日志条目
        with ThreadPoolExecutor() as executor:
            # 封装单行解析逻辑（含异常处理）
            def parse_single_line(line):
                try:
                    return self._parse_log_entry(json.loads(line))
                except json.JSONDecodeError as e:
                    print(f"JSON解析失败: {str(e)} | 内容: {line[:50]}...")  # 截断避免输出过长
                    return None
                except Exception as e:
                    print(f"未知解析错误: {str(e)}")
                    return None

            # 提交任务并获取结果
            parsed_logs = list(executor.map(parse_single_line, log_lines))

        # 过滤无效条目并创建DataFrame
        raw_df = pd.DataFrame([log for log in parsed_logs if log is not None])

        # 后续特征处理（保持单线程，因可能存在依赖关系）
        processed_df = raw_df.copy()
        for feature_name in self.processing_order:
            processor = FeatureProcessor._processors.get(feature_name)
            if not processor:
                continue

            try:
                processed_df = processor['func'](self, processed_df)
            except Exception as e:
                print(f"特征处理失败 [{feature_name}]: {str(e)}")

        return processed_df

@FeatureProcessor.register(
    feature_name="failure_count_5m",
    dependencies=[],
    required_fields=["EventId", "TimeCreated"]
)
def add_failure_count_5m(self: LogPreprocessor, df: pd.DataFrame) -> pd.DataFrame:
    """计算5分钟内同类型失败事件数"""
    df = df.sort_values('TimeCreated').reset_index(drop=True)
    
    # 生成时间索引
    df['tmp_time'] = df['TimeCreated']
    df.set_index('tmp_time', inplace=True)
    
    # 创建布尔掩码标识目标事件
    target_mask = (df['EventId'] == 4625)
    
    # 使用滚动窗口统计5分钟内的事件数
    rolling_counts = target_mask.rolling('5T').sum().fillna(0).astype(int)
    
    # 将结果合并回原DataFrame
    df['failure_count_5m'] = rolling_counts.values
    df.reset_index(drop=True, inplace=True)
    return df

@FeatureProcessor.register(
    feature_name="failure_count_5m_per_user",
    dependencies=[],
    required_fields=["EventId", "TimeCreated", "TargetUserName"]
)
def add_failure_count_5m_per_user(self, df: pd.DataFrame) -> pd.DataFrame:
    """按用户统计5分钟内失败次数"""
    df = df.sort_values('TimeCreated')
    target_mask = (df['EventId'] == 4625)
    df['failure_count_5m_per_user'] = target_mask.groupby(df['TargetUserName']).rolling('5T', on='TimeCreated').sum().fillna(0).astype(int)
    return df

@FeatureProcessor.register(
    feature_name="unique_user_count_5m_per_ip",
    dependencies=[],
    required_fields=["EventId", "TimeCreated", "IpAddress", "TargetUserName"] 
)
def add_unique_user_count_5m_per_ip(self, df: pd.DataFrame) -> pd.DataFrame:
    """按IP统计5分钟内登录的不同用户数"""
    df = df.sort_values('TimeCreated')
    target_mask = (df['EventId'] == 4624)
    df['unique_user_count_5m_per_ip'] = target_mask.groupby('IpAddress')['TargetUserName'].rolling('5T', on='TimeCreated').apply(lambda x: x.nunique()).fillna(0).astype(int)
    return df

@FeatureProcessor.register(
    feature_name="ParentProcessName",
    dependencies=[],
    required_fields=["EventId", "Payload_CallerProcessName", "ProcessName"]
)
def add_parent_process_info(self, df: pd.DataFrame) -> pd.DataFrame:
    """提取父进程信息并标记异常"""
    # 筛选进程创建事件（Windows Event ID 4688）
    process_creation_mask = (df['EventId'] == 4688)
    
    # 从Payload_CallerProcessName获取父进程名（根据实际日志结构调整字段）
    df['ParentProcessName'] = df['Payload_CallerProcessName']
    
    # 标记无父进程的异常情况（N/A或空值）
    df['ParentProcessName'] = df['ParentProcessName'].fillna('N/A')
    
    return df