# analyzer.py
from concurrent.futures import ThreadPoolExecutor
import json
import pandas as pd
from pathlib import Path
import os
import keyword
import re
class LogAnalyzer:
    def __init__(self, rules_path=None):
        if rules_path is None:
            # 调整路径指向项目根目录下的 rules.json
            current_dir = os.path.dirname(__file__)
            rules_path = os.path.join(current_dir, '..','model', 'rules.json')
        self.rules = self._load_rules(rules_path)
        self._validate_rules()
        
    def _load_rules(self, path: str) -> list:
        """加载并验证规则文件（简化版）"""
        if not Path(path).exists():
            raise FileNotFoundError(f"规则文件 {path} 不存在")
        with open(path, 'r', encoding='utf-8') as f:
            rules = json.load(f)['rules']
        
        # 仅做基础验证，移除预编译
        valid_rules = []
        for rule in rules:
            if all(k in rule for k in ['id', 'condition', 'severity']):
                valid_rules.append(rule)
            else:
                print(f"规则 {rule.get('id', '未知')} 缺少必要字段")
        
        return sorted(valid_rules, key=lambda x: x['severity'], reverse=True)
    
    
    def _validate_rules(self):
        """验证规则语法有效性"""
        required_fields = ['id', 'condition', 'severity']
        for rule in self.rules:
            missing = [f for f in required_fields if f not in rule]
            if missing:
                raise ValueError(f"规则 {rule.get('id', '未知')} 缺少必要字段: {missing}")
            

    def _extract_required_columns(self, condition: str) -> list:
        """
        从条件表达式中提取需要的列名（新增方法）
        示例：输入 "EventId == 4625 and Payload_Account_Name.str.contains('admin')"
        输出 ['EventId', 'Payload_Account_Name']
        """
        # 匹配有效的列名（排除关键字和@变量）
        valid_tokens = re.findall(r'\b\w+\b', condition)
        python_keywords = set(keyword.kwlist) | {'and', 'or', 'not', 'in', 'is', 'None', 'True', 'False'}
        
        return [
            token for token in valid_tokens
            if token not in python_keywords
            and not token.startswith('@')  # 排除类似@timestamp的变量
            and not token.isdigit()  # 排除纯数字
        ]
                
    def analyze(self, df: pd.DataFrame) -> list:
        """执行基于规则的分析"""
        results = []
        available_columns = set(df.columns)
        
        for rule in self.rules:
            required_columns = self._extract_required_columns(rule['condition'])
            missing_columns = [col for col in required_columns if col not in available_columns]
            if missing_columns:
                continue

            try:
                # 直接使用 numexpr 引擎查询
                matched = df.query(
                    rule['condition'],
                    engine='numexpr',
                    local_dict={}
                ).to_dict('records')
                
                if matched:
                    results.append({
                        'rule_id': rule['id'],
                        'rule_name': rule['name'],
                        'description': rule['description'],
                        'severity': rule['severity'].upper(),
                        'response_actions': rule['response'],
                        'matched_records': matched,
                        'trigger_count': len(matched)
                    })
            except Exception as e:
                print(f"执行规则 {rule['id']} 时出错: {str(e)}")
        
        return self._format_results(results)

    def _format_results(self, raw_results: list) -> list:
        """并行化结果后处理"""
        # 合并相同规则的记录
        def merge_task(res_list):
            merged = {}
            for res in res_list:
                key = res['rule_id']
                if key in merged:
                    merged[key]['trigger_count'] += res['trigger_count']
                    merged[key]['matched_records'].extend(res['matched_records'])
                else:
                    merged[key] = res
            return merged
        
        # 分块并行处理（适用于大规模结果集）
        chunk_size = 1000
        chunks = [raw_results[i:i+chunk_size] 
                for i in range(0, len(raw_results), chunk_size)]
        
        final_merged = {}
        with ThreadPoolExecutor() as executor:
            for chunk_result in executor.map(merge_task, chunks):
                for k, v in chunk_result.items():
                    if k in final_merged:
                        final_merged[k]['trigger_count'] += v['trigger_count']
                        final_merged[k]['matched_records'].extend(v['matched_records'])
                    else:
                        final_merged[k] = v

        # 排序逻辑保持不变
        severity_order = {'CRITICAL':0, 'HIGH':1, 'MEDIUM':2, 'LOW':3}
        return sorted(
            final_merged.values(), 
            key=lambda x: severity_order.get(x['severity'], 99)
        )

    def generate_report(self, results: list, format: str = 'json') -> str:
        """生成分析报告"""
        if format == 'json':
            return json.dumps(results, indent=2, ensure_ascii=False)
        elif format == 'text':
            report = []
        
            for res in results:
                report.append(
                    f"[{res['severity']}] {res['rule_name']}\n"
                    f"触发次数: {res['trigger_count']}\n"
                    f"建议响应: {', '.join(res['response_actions'])}\n"
                    "匹配记录样例:\n" + 
                    '\n'.join([f"- {r['Computer']}@{r['TimeCreated']}" 
                              for r in res['matched_records'][:3]])
                )
            return '\n\n'.join(report)
        else:
            raise ValueError("不支持的报告格式")
