import os
import re
import json
import tarfile
import tempfile
import shutil
from datetime import datetime, timedelta # 恢复 timedelta 导入
from collections import defaultdict, Counter
from typing import Dict, List, Any, Optional
import logging
from .utils.reboot_analyzer import analyze_reboot_times_and_assign_stages # 导入新的工具函数

logger = logging.getLogger(__name__)

class LogProcessor:
    """日志处理器 - 负责日志的解析、分类和分析"""
    
    def __init__(self):
        self.log_pattern = re.compile(
            r'(\d{4}-\d{2}-\d{2})\s+(\d{2}:\d{2}:\d{2}\.\d+)\s+(\w+)\s+(\w+):([^\(]+)\((\d+)\):\s*(.+)' # 重新修正正则表达式
        )
        self.logs = []
        self.analysis_results = {}
        
        # 从配置文件加载组件分类
        self.component_categories = self._load_component_categories()
        
        # 确保temp目录存在
        os.makedirs('temp', exist_ok=True)

    def _load_component_categories(self) -> Dict[str, List[str]]:
        """从配置文件加载组件分类数据"""
        config_path = os.path.join(os.path.dirname(__file__), '..' ,'config', 'component_categories.json')
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            logger.error(f"组件分类配置文件未找到: {config_path}")
            return {}
        except json.JSONDecodeError:
            logger.error(f"组件分类配置文件格式错误: {config_path}")
            return {}
        except Exception as e:
            logger.error(f"加载组件分类配置文件时出错: {config_path}, 错误: {e}")
            return {}
    
    def parse_log_line(self, line: str) -> Optional[Dict]:
        """解析单行日志"""
        match = self.log_pattern.match(line.strip())
        if match:
            date, time, component, level, file_info, line_num, message = match.groups()
            log_entry = {
                'date': date,
                'time': time,
                'datetime': f"{date} {time}",
                'component': component,
                'level': level,
                'file_info': file_info.strip(),
                'line_num': int(line_num),
                'message': message.strip(),
                'full_message': line.strip()
            }
            logger.debug(f"成功解析日志行: {log_entry['datetime']} {log_entry['component']} {log_entry['level']}")
            return log_entry
        else:
            logger.debug(f"未匹配日志行格式: {line.strip()[:100]}...") # 打印前100个字符避免过长
        return None
    
    def extract_logs_from_file(self, file_path: str):
        """从文件中提取日志"""
        initial_log_count = len(self.logs)
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                for line_num, line in enumerate(f, 1):
                    log_entry = self.parse_log_line(line)
                    if log_entry:
                        log_entry['source_file'] = os.path.basename(file_path)
                        log_entry['file_line'] = line_num
                        self.logs.append(log_entry)
            logger.info(f"从文件 {file_path} 提取了 {len(self.logs) - initial_log_count} 条日志")
        except Exception as e:
            logger.error(f"读取文件 {file_path} 时出错: {e}")
    
    def extract_from_tar_gz(self, tar_gz_path: str):
        """从tar.gz文件中提取日志"""
        try:
            with tarfile.open(tar_gz_path, 'r:gz') as tar:
                for member in tar.getmembers():
                    # 检查文件是否在LogDump目录下且以.log结尾
                    if 'LogDump' in member.name and member.name.endswith('.log'):
                        # 提取到临时文件
                        temp_file = os.path.join('temp', os.path.basename(member.name))
                        with tar.extractfile(member) as source, open(temp_file, 'wb') as target:
                            shutil.copyfileobj(source, target)
                        self.extract_logs_from_file(temp_file)
                        # 保留临时文件以便后续查看，不删除
        except Exception as e:
            logger.error(f"处理tar.gz文件时出错: {e}")
    
    def load_from_sample_logs(self):
        """从sample_logs目录加载示例日志"""
        sample_dir = 'sample_logs'
        if os.path.exists(sample_dir):
            for filename in os.listdir(sample_dir):
                if filename.endswith('.log'):
                    file_path = os.path.join(sample_dir, filename)
                    self.extract_logs_from_file(file_path)
    
    def categorize_components(self) -> Dict:
        """按组件分类"""
        component_stats = defaultdict(lambda: {
            'count': 0,
            'levels': Counter(),
            'category': 'unknown' 
        })
        
        for log in self.logs:
            component = log['component'].strip().lower() # 转换为小写并去除空格
            level = log['level']
            
            component_stats[component]['count'] += 1
            component_stats[component]['levels'][level] += 1

            # 根据新的component_categories进行分类
            found_category = False
            for cat_name, components_in_cat in self.component_categories.items():
                # 将分类列表中的组件名也转换为小写进行比较
                if component in [c.lower() for c in components_in_cat]:
                    component_stats[component]['category'] = cat_name
                    found_category = True
                    break # 找到第一个匹配的类别后就停止
            
            if not found_category:
                component_stats[component]['category'] = 'others' # 如果没有匹配到任何类别，则归类为 'others'
            
        return dict(component_stats)
    
    def find_duplicate_logs(self) -> Dict:
        """查找重复日志"""
        duplicate_groups = defaultdict(list)
        
        for log in self.logs:
            key = (log['file_info'], log['line_num'], log['message'])
            duplicate_groups[key].append(log)
        
        duplicates = {}
        for key, logs in duplicate_groups.items():
            if len(logs) > 1:
                file_info, line_num, message = key
                duplicates[f"{file_info}({line_num})"] = {
                    'message': message,
                    'count': len(logs),
                    'occurrences': [
                        {
                            'datetime': log['datetime'],
                            'component': log['component'],
                            'level': log['level'],
                            'source_file': log['source_file']
                        } for log in logs
                    ]
                }
        
        return duplicates
    
    def analyze_time_patterns(self) -> Dict:
        """分析时间模式"""
        time_stats = {
            'hourly_distribution': Counter(),
            'daily_distribution': Counter(),
            'level_distribution': Counter()
        }
        
        for log in self.logs:
            try:
                dt = datetime.strptime(log['datetime'], '%Y-%m-%d %H:%M:%S.%f')
                time_stats['hourly_distribution'][dt.hour] += 1
                time_stats['daily_distribution'][dt.strftime('%Y-%m-%d')] += 1
                time_stats['level_distribution'][log['level']] += 1
            except:
                pass
        
        return time_stats
    
    def analyze_reboot_times(self) -> Dict[str, Any]:
        """
        分析日志中的重启时间。
        调用外部工具函数 `analyze_reboot_times_and_assign_stages` 来执行实际的分析。
        """
        logger.info("调用重启分析工具函数...")
        # 确保传递的是日志的副本，以避免在工具函数内部修改原始日志列表
        return analyze_reboot_times_and_assign_stages(self.logs)
    
    def _apply_rolling_average(self, data: List[int], window_size: int) -> List[float]:
        """对数据应用滚动平均平滑处理。"""
        if not data:
            return []
        
        smoothed_data = []
        for i in range(len(data)):
            # 计算当前窗口的起始和结束索引
            start_index = max(0, i - window_size // 2)
            end_index = min(len(data), i + window_size // 2 + 1)
            
            # 提取窗口内的数据
            window = data[start_index:end_index]
            
            # 计算平均值
            smoothed_data.append(sum(window) / len(window))
        return smoothed_data

    def analyze_critical_time_nodes(self) -> Dict[str, Any]:
        """
        分析关键时间节点，包括嵌入式平台重启情况和错误事件。
        目的：用于可视化嵌入式平台重启情况和错误事件。
        横坐标 (X轴)：显示“真实时间”（已修复 1970 年的时间为真实时间）。
        纵坐标 (Y轴)：显示“各个子系统的 Error 错误事件的数量”。
        数据平滑：对错误数量数据应用了滚动平均（窗口大小为 5）以避免过多的尖端数据，使曲线更平滑。
        子系统划分：错误事件按照“组件类别”（子系统）进行分组显示。
        """
        logger.info(f"开始分析关键时间节点。已加载的组件类别: {self.component_categories.keys()}")
        reboot_analysis = self.analyze_reboot_times()
        fixed_logs = reboot_analysis['fixed_logs']
        
        # 按照 20 分钟间隔聚合错误事件
        time_grouped_errors = defaultdict(lambda: defaultdict(int)) # {timestamp: {category: error_count}}
        all_timestamps = set()
        
        for log in fixed_logs:
            if log['level'].upper() == 'ERROR':
                try:
                    # 使用修正后的时间
                    log_dt = datetime.strptime(log['datetime'], '%Y-%m-%d %H:%M:%S.%f')
                    # 将时间向下取整到最近的20分钟间隔
                    minute_interval = (log_dt.minute // 20) * 20
                    # 构建新的时间对象，秒和微秒归零，分钟设置为20分钟间隔
                    aggregated_dt = log_dt.replace(minute=minute_interval, second=0, microsecond=0)
                    
                    timestamp_key = aggregated_dt.isoformat() # 使用ISO格式作为时间戳键
                    
                    # 根据组件获取其类别
                    component_name = log['component'].strip().lower()
                    category = 'others'
                    for cat_name, components_in_cat in self.component_categories.items():
                        if component_name in [c.lower() for c in components_in_cat]:
                            category = cat_name
                            break
                    
                    time_grouped_errors[timestamp_key][category] += 1
                    all_timestamps.add(aggregated_dt)
                    logger.debug(f"处理错误日志: {log['datetime']}, 组件: {component_name}, 映射到类别: {category}")
                except ValueError:
                    logger.warning(f"无法解析日志时间: {log['datetime']}")
                    continue
        
        # 对时间戳进行排序
        sorted_timestamps = sorted(list(all_timestamps))
        
        # 生成图表所需的数据结构
        # { 'timestamps': ['time1', 'time2', ...], 'series': { 'CategoryA': [count1, count2, ...], 'CategoryB': [...] } }
        chart_data: Dict[str, Any] = {
            'timestamps': [dt.strftime('%Y-%m-%d %H:%M') for dt in sorted_timestamps],
            'series': {}
        }
        
        # 收集所有组件类别
        all_categories = sorted(list(set(cat for time_data in time_grouped_errors.values() for cat in time_data.keys())))
        logger.info(f"聚合后的所有类别: {all_categories}")
        
        # 为每个组件类别生成系列数据并应用滚动平均
        for category in all_categories:
            category_data_raw = []
            for dt in sorted_timestamps:
                timestamp_key = dt.isoformat()
                category_data_raw.append(time_grouped_errors[timestamp_key].get(category, 0))
            
            # 应用滚动平均
            smoothed_data = self._apply_rolling_average(category_data_raw, 5)
            chart_data['series'][category] = smoothed_data
            logger.debug(f"类别 {category} 的平滑数据点数量: {len(smoothed_data)}")
            
        logger.info(f"关键时间节点分析完成。图表系列键: {chart_data['series'].keys()}")
        return chart_data

    def generate_summary(self) -> Dict:
        """生成分析摘要"""
        if not self.logs:
            return {}
        
        total_logs = len(self.logs)
        components = self.categorize_components()
        duplicates = self.find_duplicate_logs()
        time_patterns = self.analyze_time_patterns()
        reboot_analysis = self.analyze_reboot_times()
        critical_time_nodes = self.analyze_critical_time_nodes()
        
        duplicate_count = sum(dup['count'] for dup in duplicates.values())
        duplicate_ratio = (duplicate_count / total_logs) * 100 if total_logs > 0 else 0
        
        most_active_components = sorted(
            components.items(), 
            key=lambda x: x[1]['count'], 
            reverse=True
        )[:5]
        
        # 确保返回完整的分析结果，包含所有必要的数据
        summary = {
            'total_logs': total_logs,
            'unique_components': len(components),
            'duplicate_ratio': round(duplicate_ratio, 2),
            'duplicate_count': duplicate_count,
            'most_active_components': most_active_components,
            'time_patterns': time_patterns,
            'components': components,
            'duplicates': duplicates,
            'reboot_analysis': reboot_analysis,
            'critical_time_nodes': critical_time_nodes
        }
        
        logger.info(f"生成分析摘要: {total_logs} 条日志, {len(components)} 个组件, {len(duplicates)} 组重复, {len(reboot_analysis['reboot_events'])} 次重启")
        
        return summary
    
    def clear_logs(self):
        """清空日志数据"""
        self.logs = []
        self.analysis_results = {} 