# -*- coding: utf-8 -*-
# @File    : log_analysis_service.py
# @Desc    : 日志解析服务

import re
import os
import csv
from datetime import datetime
from typing import List, Dict, Tuple, Optional
from flask import current_app
import shutil


class LogAnalysisService:
    """日志解析服务类"""
    
    def __init__(self):
        """初始化服务"""
        # 获取配置的上传和结果目录，如果没有配置则使用默认值
        self.upload_dir = getattr(current_app.config, 'LOG_UPLOAD_DIR', 'uploads/logs')
        self.result_dir = getattr(current_app.config, 'LOG_RESULT_DIR', 'uploads/results')
        
        # 确保目录存在
        os.makedirs(self.upload_dir, exist_ok=True)
        os.makedirs(self.result_dir, exist_ok=True)
        
    def save_uploaded_file(self, file, filename: str = None) -> str:
        """
        保存上传的文件
        
        Args:
            file: Flask文件对象
            filename: 指定的文件名，如果为None则自动生成
            
        Returns:
            str: 保存的文件路径
        """
        if filename is None:
            # 生成基于时间的唯一文件名
            file_ext = os.path.splitext(file.filename)[1] if file.filename else '.log'
            current_time = datetime.now()
            filename = current_time.strftime("%Y%m%d%H%M%S") + file_ext
        
        file_path = os.path.join(self.upload_dir, filename)
        file.save(file_path)
        current_app.logger.info(f"日志文件已保存: {file_path}")
        return file_path
    
    def timestamp_to_datetime(self, timestamp: float) -> str:
        """
        将Unix时间戳转换为可读的日期时间格式
        
        Args:
            timestamp: Unix时间戳
            
        Returns:
            str: 格式化的日期时间字符串
        """
        try:
            dt = datetime.fromtimestamp(timestamp)
            return dt.strftime('%Y-%m-%d %H:%M:%S')
        except (ValueError, OSError) as e:
            return f"时间戳错误: {timestamp}"
    
    def parse_skip_record(self, line: str) -> Optional[Dict[str, any]]:
        """
        解析单条丢录像记录
        
        Args:
            line: 日志行内容
            
        Returns:
            Dict: 解析后的记录信息，包含通道、开始时间、结束时间等
        """
        # 简化的正则表达式匹配模式
        # 匹配: record skip ch:通道号 skip(数字) info time 开始时间.结束时间
        pattern = r'record skip ch:(\d+) skip\((\d+)\) info time (\d+(?:\.\d+)?)\.(\d+(?:\.\d+)?)'
        
        match = re.search(pattern, line)
        if match:
            # 提取日志时间（从行首的时间戳）
            time_pattern = r'\[([^\]]+)\]'
            time_match = re.search(time_pattern, line)
            log_time = time_match.group(1) if time_match else "未知时间"
            
            channel = int(match.group(1))
            skip_count = int(match.group(2))
            start_timestamp = float(match.group(3))
            end_timestamp = float(match.group(4))
            
            # 计算丢录像时长（秒）
            duration = end_timestamp - start_timestamp
            
            return {
                'log_time': log_time,
                'channel': channel,
                'channel_display': channel + 1,  # 通道号+1显示（ch:0 = 通道1）
                'skip_count': skip_count,
                'start_timestamp': start_timestamp,
                'end_timestamp': end_timestamp,
                'start_time': self.timestamp_to_datetime(start_timestamp),
                'end_time': self.timestamp_to_datetime(end_timestamp),
                'duration': duration,
                'raw_line': line.strip()
            }
        return None
    
    def parse_log_file(self, log_file_path: str) -> Tuple[List[Dict[str, any]], Dict[str, any]]:
        """
        解析整个日志文件
        
        Args:
            log_file_path: 日志文件路径
            
        Returns:
            Tuple[List[Dict], Dict]: (丢录像记录列表, 解析统计信息)
        """
        if not os.path.exists(log_file_path):
            raise FileNotFoundError(f"日志文件不存在: {log_file_path}")
        
        records = []
        line_count = 0
        matched_count = 0
        
        # 尝试不同的编码方式
        encodings = ['utf-16', 'utf-8', 'gbk', 'latin1']
        file_content = None
        used_encoding = None
        
        for encoding in encodings:
            try:
                with open(log_file_path, 'r', encoding=encoding, errors='ignore') as f:
                    file_content = f.readlines()
                used_encoding = encoding
                current_app.logger.info(f"成功使用 {encoding} 编码读取文件")
                break
            except Exception as e:
                current_app.logger.warning(f"尝试 {encoding} 编码失败: {str(e)}")
                continue
        
        if file_content is None:
            raise Exception("无法读取日志文件，尝试了所有编码方式")
        
        try:
            for line in file_content:
                line_count += 1
                
                # 检查是否包含目标模式
                if 'gui_playback_timeosg_seglist_add' in line and 'record skip ch:' in line:
                    record = self.parse_skip_record(line)
                    if record:
                        records.append(record)
                        matched_count += 1
                        
        except Exception as e:
            raise Exception(f"解析日志文件时出错: {str(e)}")
        
        # 返回解析结果和统计信息
        stats = {
            'total_lines': line_count,
            'matched_records': matched_count,
            'encoding_used': used_encoding,
            'file_size': os.path.getsize(log_file_path)
        }
        
        current_app.logger.info(f"日志文件解析完成: 总行数={line_count}, 匹配记录数={matched_count}")
        
        return records, stats
    
    def format_duration(self, duration: float) -> str:
        """
        格式化时长显示
        
        Args:
            duration: 时长（秒）
            
        Returns:
            str: 格式化的时长字符串
        """
        if duration < 60:
            return f"{duration:.1f}秒"
        elif duration < 3600:
            minutes = duration / 60
            return f"{minutes:.1f}分钟({duration:.1f}秒)"
        else:
            hours = duration / 3600
            return f"{hours:.1f}小时({duration:.1f}秒)"
    
    def generate_analysis_results(self, records: List[Dict[str, any]], 
                                  log_file_path: str, stats: Dict[str, any]) -> Dict[str, any]:
        """
        生成分析结果
        
        Args:
            records: 丢录像记录列表
            log_file_path: 原日志文件路径
            stats: 解析统计信息
            
        Returns:
            Dict: 完整的分析结果
        """
        if not records:
            return {
                'summary': '未发现丢录像记录',
                'stats': stats,
                'channels': {},
                'total_stats': {
                    'affected_channels': 0,
                    'total_records': 0,
                    'total_duration': 0,
                    'average_duration': 0
                }
            }
        
        # 按通道分组统计
        channel_stats = {}
        for record in records:
            channel = record['channel_display']
            if channel not in channel_stats:
                channel_stats[channel] = []
            channel_stats[channel].append(record)
        
        # 计算总体统计
        total_records = len(records)
        total_duration = sum(r['duration'] for r in records)
        affected_channels = len(channel_stats)
        avg_duration = total_duration / total_records if total_records > 0 else 0
        
        # 构建通道详细信息
        channels_detail = {}
        for channel, channel_records in channel_stats.items():
            channel_total_duration = sum(r['duration'] for r in channel_records)
            channels_detail[channel] = {
                'records_count': len(channel_records),
                'total_duration': channel_total_duration,
                'total_duration_formatted': self.format_duration(channel_total_duration),
                'records': channel_records
            }
        
        return {
            'summary': f"发现 {total_records} 条丢录像记录，涉及 {affected_channels} 个通道",
            'stats': stats,
            'channels': channels_detail,
            'total_stats': {
                'affected_channels': affected_channels,
                'total_records': total_records,
                'total_duration': total_duration,
                'total_duration_formatted': self.format_duration(total_duration),
                'average_duration': avg_duration,
                'average_duration_formatted': self.format_duration(avg_duration)
            }
        }
    
    def export_to_txt(self, analysis_results: Dict[str, any], 
                      original_filename: str = None, output_filename: str = None) -> str:
        """
        导出分析报告到TXT文件
        
        Args:
            analysis_results: 分析结果
            original_filename: 原始文件名（未使用，保留兼容性）
            output_filename: 输出文件名，如果为None则基于当前时间生成
            
        Returns:
            str: 输出文件路径
        """
        if not output_filename:
            # 使用当前时间生成文件名：年月日-时分秒.txt
            current_time = datetime.now()
            output_filename = current_time.strftime("%Y%m%d-%H%M%S.txt")
        
        output_path = os.path.join(self.result_dir, output_filename)
        
        try:
            # 严格按照log_parser.py的generate_report格式
            if analysis_results['total_stats']['total_records'] == 0:
                report_content = "未发现丢录像记录"
            else:
                report_lines = []
                report_lines.append("=" * 80)
                report_lines.append("丢录像记录分析报告")
                report_lines.append("=" * 80)
                report_lines.append(f"日志文件: {original_filename if original_filename else '上传文件'}")
                report_lines.append(f"发现 {analysis_results['total_stats']['total_records']} 条丢录像记录\n")
                
                # 按通道详细信息
                channels = analysis_results['channels']
                if channels:
                    for channel_num in sorted(channels.keys()):
                        channel_info = channels[channel_num]
                        report_lines.append(f"通道{channel_num} - 共 {channel_info['records_count']} 条记录, "
                                           f"总丢录像时长: {channel_info['total_duration_formatted']}")
                        report_lines.append("-" * 60)
                        
                        for i, record in enumerate(channel_info['records'], 1):
                            report_lines.append(f"  {i:2d}. 通道{record['channel_display']} "
                                               f"在 {record['start_time']} 到 {record['end_time']} 之间存在丢录像")
                            report_lines.append(f"      丢录像时间为 {self.format_duration(record['duration'])}")
                            report_lines.append(f"      时间戳: {record['start_timestamp']} -> {record['end_timestamp']}")
                            report_lines.append("")
                
                # 总体统计（严格按照原格式）
                report_lines.append("=" * 80)
                
                report_content = "\n".join(report_lines)
            
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(report_content)
            
            current_app.logger.info(f"分析报告已导出: {output_path}")
            return output_path
            
        except Exception as e:
            current_app.logger.error(f"导出报告时出错: {str(e)}")
            raise Exception(f"导出分析报告失败: {str(e)}")
    
    def analyze_log_file(self, log_file_path: str) -> Tuple[Dict[str, any], str]:
        """
        完整的日志分析流程
        
        Args:
            log_file_path: 日志文件路径
            
        Returns:
            Tuple[Dict, str]: (分析结果, TXT文件路径)
        """
        try:
            # 解析日志文件
            records, stats = self.parse_log_file(log_file_path)
            
            # 生成分析结果
            analysis_results = self.generate_analysis_results(records, log_file_path, stats)
            
            # 获取原始文件名
            original_filename = os.path.basename(log_file_path)
            
            # 导出TXT文件
            txt_path = self.export_to_txt(analysis_results, original_filename)
            
            return analysis_results, txt_path
            
        except Exception as e:
            current_app.logger.error(f"分析日志文件失败: {str(e)}")
            raise
    
    def delete_file(self, file_path: str) -> bool:
        """
        删除指定文件
        
        Args:
            file_path: 文件路径
            
        Returns:
            bool: 删除是否成功
        """
        try:
            if os.path.exists(file_path):
                os.remove(file_path)
                current_app.logger.info(f"文件已删除: {file_path}")
                return True
            else:
                current_app.logger.warning(f"文件不存在，无法删除: {file_path}")
                return False
        except Exception as e:
            current_app.logger.error(f"删除文件失败: {file_path}, 错误: {str(e)}")
            return False
    
    def list_result_files(self) -> List[Dict[str, any]]:
        """
        列出所有结果文件
        
        Returns:
            List[Dict]: 结果文件列表，包含文件信息
        """
        try:
            files = []
            if os.path.exists(self.result_dir):
                for filename in os.listdir(self.result_dir):
                    file_path = os.path.join(self.result_dir, filename)
                    if os.path.isfile(file_path):
                        stat = os.stat(file_path)
                        files.append({
                            'filename': filename,
                            'file_path': file_path,
                            'size': stat.st_size,
                            'created_time': datetime.fromtimestamp(stat.st_ctime).strftime('%Y-%m-%d %H:%M:%S'),
                            'modified_time': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S')
                        })
            
            # 按修改时间降序排列
            files.sort(key=lambda x: x['modified_time'], reverse=True)
            return files
            
        except Exception as e:
            current_app.logger.error(f"列出结果文件失败: {str(e)}")
            return [] 