# -*- coding: utf-8 -*-
# @File    : log_parser.py
# @Desc    : 解析日志文件中的丢录像记录

import re
import sys
import os
from datetime import datetime
from typing import List, Dict, Tuple


class LogParser:
    def __init__(self, log_file_path: str):
        """
        初始化日志解析器
        
        Args:
            log_file_path: 日志文件路径
        """
        self.log_file_path = log_file_path
        self.missing_records = []
        
    def timestamp_to_datetime(self, timestamp: float) -> str:
        """
        将Unix时间戳转换为可读的日期时间格式
        
        Args:
            timestamp: Unix时间戳
            
        Returns:
            str: 格式化的日期时间字符串
        """
        try:
            dt = datetime.fromtimestamp(timestamp)
            return dt.strftime('%Y-%m-%d %H:%M:%S')
        except (ValueError, OSError) as e:
            return f"时间戳错误: {timestamp}"
    
    def parse_skip_record(self, line: str) -> Dict[str, any]:
        """
        解析单条丢录像记录
        
        Args:
            line: 日志行内容
            
        Returns:
            Dict: 解析后的记录信息，包含通道、开始时间、结束时间等
        """
        # 简化的正则表达式匹配模式
        # 匹配: record skip ch:通道号 skip(数字) info time 开始时间.结束时间
        pattern = r'record skip ch:(\d+) skip\((\d+)\) info time (\d+(?:\.\d+)?)\.(\d+(?:\.\d+)?)'
        
        match = re.search(pattern, line)
        if match:
            # 提取日志时间（从行首的时间戳）
            time_pattern = r'\[([^\]]+)\]'
            time_match = re.search(time_pattern, line)
            log_time = time_match.group(1) if time_match else "未知时间"
            
            channel = int(match.group(1))
            skip_count = int(match.group(2))
            start_timestamp = float(match.group(3))
            end_timestamp = float(match.group(4))
            
            # 计算丢录像时长（秒）
            duration = end_timestamp - start_timestamp
            
            return {
                'log_time': log_time,
                'channel': channel,
                'channel_display': channel + 1,  # 通道号+1显示（ch:0 = 通道1）
                'skip_count': skip_count,
                'start_timestamp': start_timestamp,
                'end_timestamp': end_timestamp,
                'start_time': self.timestamp_to_datetime(start_timestamp),
                'end_time': self.timestamp_to_datetime(end_timestamp),
                'duration': duration,
                'raw_line': line.strip()
            }
        return None
    
    def parse_log_file(self) -> List[Dict[str, any]]:
        """
        解析整个日志文件
        
        Returns:
            List[Dict]: 所有丢录像记录列表
        """
        if not os.path.exists(self.log_file_path):
            print(f"日志文件不存在: {self.log_file_path}")
            return []
        
        records = []
        line_count = 0
        matched_count = 0
        
        # 尝试不同的编码方式
        encodings = ['utf-16', 'utf-8', 'gbk', 'latin1']
        file_content = None
        
        for encoding in encodings:
            try:
                with open(self.log_file_path, 'r', encoding=encoding, errors='ignore') as f:
                    file_content = f.readlines()
                print(f" 成功使用 {encoding} 编码读取文件")
                break
            except Exception as e:
                print(f"尝试 {encoding} 编码失败: {str(e)}")
                continue
        
        if file_content is None:
            print("无法读取日志文件，尝试了所有编码方式")
            return []
        
        try:
            for line in file_content:
                line_count += 1
                
                # 检查是否包含目标模式
                if 'gui_playback_timeosg_seglist_add' in line and 'record skip ch:' in line:
                    record = self.parse_skip_record(line)
                    if record:
                        records.append(record)
                        matched_count += 1
                        
        except Exception as e:
            print(f"解析日志文件时出错: {str(e)}")
            return []
        
        print(f"日志文件解析完成:")
        print(f"   - 总行数: {line_count}")
        print(f"   - 匹配记录数: {matched_count}")
        
        return records
    
    def format_duration(self, duration: float) -> str:
        """
        格式化时长显示
        
        Args:
            duration: 时长（秒）
            
        Returns:
            str: 格式化的时长字符串
        """
        if duration < 60:
            return f"{duration:.1f}秒"
        elif duration < 3600:
            minutes = duration / 60
            return f"{minutes:.1f}分钟({duration:.1f}秒)"
        else:
            hours = duration / 3600
            return f"{hours:.1f}小时({duration:.1f}秒)"
    
    def generate_report(self, records: List[Dict[str, any]]) -> str:
        """
        生成解析报告
        
        Args:
            records: 丢录像记录列表
            
        Returns:
            str: 格式化的报告内容
        """
        if not records:
            return "未发现丢录像记录"
        
        report = []
        report.append("=" * 80)
        report.append("丢录像记录分析报告")
        report.append("=" * 80)
        report.append(f"日志文件: {self.log_file_path}")
        report.append(f"发现 {len(records)} 条丢录像记录\n")
        
        # 按通道分组统计
        channel_stats = {}
        for record in records:
            channel = record['channel_display']
            if channel not in channel_stats:
                channel_stats[channel] = []
            channel_stats[channel].append(record)
        
        # 输出每个通道的记录
        for channel in sorted(channel_stats.keys()):
            channel_records = channel_stats[channel]
            total_duration = sum(r['duration'] for r in channel_records)
            
            report.append(f"通道{channel} - 共 {len(channel_records)} 条记录, 总丢录像时长: {self.format_duration(total_duration)}")
            report.append("-" * 60)
            
            for i, record in enumerate(channel_records, 1):
                report.append(f"  {i:2d}. 通道{record['channel_display']} 在 {record['start_time']} 到 {record['end_time']} 之间存在丢录像")
                report.append(f"      丢录像时间为 {self.format_duration(record['duration'])}")
                report.append(f"      跳跃段数: {record['skip_count']}")
                report.append(f"      时间戳: {record['start_timestamp']} -> {record['end_timestamp']}")
                report.append("")
        
        # 总体统计
        report.append("=" * 80)
        report.append("总体统计:")
        total_records = len(records)
        total_duration = sum(r['duration'] for r in records)
        affected_channels = len(channel_stats)
        
        report.append(f"   - 受影响通道数: {affected_channels}")
        report.append(f"   - 总丢录像记录数: {total_records}")
        report.append(f"   - 总丢录像时长: {self.format_duration(total_duration)}")
        
        if total_records > 0:
            avg_duration = total_duration / total_records
            report.append(f"   - 平均丢录像时长: {self.format_duration(avg_duration)}")
        
        return "\n".join(report)
    
    def export_to_csv(self, records: List[Dict[str, any]], output_file: str = None) -> str:
        """
        导出记录到CSV文件
        
        Args:
            records: 丢录像记录列表
            output_file: 输出文件路径，如果为None则自动生成
            
        Returns:
            str: 输出文件路径
        """
        if not output_file:
            base_name = os.path.splitext(os.path.basename(self.log_file_path))[0]
            output_file = f"{base_name}_missing_records.csv"
        
        try:
            import csv
            with open(output_file, 'w', newline='', encoding='utf-8-sig') as csvfile:
                fieldnames = ['通道', '开始时间', '结束时间', '丢录像时长(秒)', '跳跃段数', '开始时间戳', '结束时间戳']
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                
                writer.writeheader()
                for record in records:
                    writer.writerow({
                        '通道': record['channel_display'],
                        '开始时间': record['start_time'],
                        '结束时间': record['end_time'],
                        '丢录像时长(秒)': f"{record['duration']:.1f}",
                        '跳跃段数': record['skip_count'],
                        '开始时间戳': record['start_timestamp'],
                        '结束时间戳': record['end_timestamp']
                    })
            
            print(f"CSV报告已导出到: {output_file}")
            return output_file
            
        except Exception as e:
            print(f" 导出CSV时出错: {str(e)}")
            return None


def main(log_file: str):
    """主函数"""

    export_csv = '--csv' in sys.argv
    quiet_mode = '--quiet' in sys.argv
    
    # 创建解析器实例
    parser = LogParser(log_file)
    
    if not quiet_mode:
        print(" 开始解析日志文件...")
        print(f"文件路径: {log_file}")
        print()
    
    # 解析日志
    records = parser.parse_log_file()
    
    if not quiet_mode:
        print()
    
    # 生成并输出报告
    report = parser.generate_report(records)
    print(report)
    
    # 导出CSV（如果需要）
    if export_csv and records:
        print()
        parser.export_to_csv(records)
    
    # 保存文本报告
    if records:
        base_name = os.path.splitext(os.path.basename(log_file))[0]
        report_file = f"{base_name}_analysis_report.txt"
        try:
            with open(report_file, 'w', encoding='utf-8') as f:
                f.write(report)
            print(f"详细报告已保存到: {report_file}")
        except Exception as e:
            print(f"保存报告时出错: {str(e)}")


if __name__ == "__main__":
    main(r'E:\vepai-aut\nvr\NC2-10_2025-06-10_18_20_31.log') 