import os
import re
import logging
import pandas as pd
import chardet
from typing import List, Dict, Optional, Union, Tuple
from concurrent.futures import ThreadPoolExecutor

from dsym_manager import DSYMManager
from symbol_parser import SymbolParser

class CrashParser:
    """iOS崩溃堆栈解析器"""
    
    def __init__(self, dsym_path: str, arch: str = "arm64", max_workers: int = 4):
        """
        初始化崩溃堆栈解析器
        
        Args:
            dsym_path: dSYM文件或包含多个dSYM文件的目录路径
            arch: 目标架构，默认为arm64
            max_workers: 最大工作线程数
        """
        self.dsym_path = dsym_path
        self.arch = arch
        self.max_workers = max_workers
        self.logger = logging.getLogger("CrashParser")
        
        # 初始化dSYM管理器
        self.dsym_manager = DSYMManager()
        self.dsym_files = self.dsym_manager.find_dsym_files(dsym_path)
        
        if not self.dsym_files:
            self.logger.warning(f"在路径 {dsym_path} 中未找到任何dSYM文件")
    
    def detect_file_encoding(self, file_path: str) -> str:
        """
        检测文件编码
        
        Args:
            file_path: 文件路径
            
        Returns:
            检测到的编码，如果无法检测则返回UTF-8
        """
        try:
            with open(file_path, 'rb') as f:
                # 读取文件前4KB用于检测编码
                raw_data = f.read(4096)
                if not raw_data:
                    return 'utf-8'
                
                # 检测编码
                result = chardet.detect(raw_data)
                encoding = result['encoding']
                confidence = result['confidence']
                
                self.logger.info(f"检测到文件编码: {encoding}, 置信度: {confidence}")
                
                # 如果置信度较低或无法识别，使用通用编码
                if not encoding or confidence < 0.6:
                    return 'utf-8-sig'
                
                return encoding
        except Exception as e:
            self.logger.warning(f"检测文件编码时出错: {str(e)}, 将使用默认编码UTF-8")
            return 'utf-8-sig'  # 使用带BOM的UTF-8作为后备

    def parse_csv_file(self, csv_file: str, stack_column: Optional[str] = None, 
                      output_file: Optional[str] = None, result_column: str = "result",
                      encoding: Optional[str] = None) -> pd.DataFrame:
        """
        解析CSV文件中的崩溃堆栈
        
        Args:
            csv_file: CSV文件路径
            stack_column: 包含崩溃堆栈的列名，如果为None则自动检测
            output_file: 输出CSV文件路径，如果为None则不保存
            result_column: 结果列名
            encoding: 文件编码，如果为None则自动检测
            
        Returns:
            包含解析结果的DataFrame
        """
        try:
            # 检测文件编码
            if not encoding:
                encoding = self.detect_file_encoding(csv_file)
            
            # 读取CSV文件
            self.logger.info(f"正在读取CSV文件: {csv_file}, 编码: {encoding}")
            
            try:
                # 尝试使用检测到的编码读取
                df = pd.read_csv(csv_file, encoding=encoding)
            except UnicodeDecodeError:
                # 如果编码错误，尝试其他常见编码
                self.logger.warning(f"使用 {encoding} 编码读取失败，尝试其他编码")
                encodings_to_try = ['utf-8-sig', 'latin1', 'iso-8859-1', 'cp1252']
                
                for enc in encodings_to_try:
                    try:
                        df = pd.read_csv(csv_file, encoding=enc)
                        self.logger.info(f"成功使用编码 {enc} 读取文件")
                        break
                    except UnicodeDecodeError:
                        continue
                else:
                    # 如果所有编码都失败，使用错误处理策略
                    self.logger.warning("所有编码尝试失败，使用替换错误策略")
                    df = pd.read_csv(csv_file, encoding='utf-8', errors='replace')
            
            # 如果未指定堆栈列，尝试自动检测
            if stack_column is None:
                stack_column = self._detect_stack_column(df)
                if stack_column is None:
                    self.logger.error("无法自动检测崩溃堆栈列")
                    return df
            
            # 检查列是否存在
            if stack_column not in df.columns:
                self.logger.error(f"CSV文件中不存在列: {stack_column}")
                return df
            
            # 解析每一行
            self.logger.info(f"正在解析崩溃堆栈，列: {stack_column}")
            
            # 提取需要解析的堆栈
            stacks_to_parse = []
            for idx, row in df.iterrows():
                stack = row[stack_column]
                if pd.isna(stack) or not isinstance(stack, str):
                    stacks_to_parse.append(None)
                else:
                    stacks_to_parse.append(stack)
            
            # 并行解析
            parsed_results = self._parse_stacks_parallel(stacks_to_parse)
            
            # 添加结果列
            df[result_column] = parsed_results
            
            # 保存结果
            if output_file:
                self.logger.info(f"正在保存解析结果到: {output_file}")
                df.to_csv(output_file, index=False, encoding='utf-8')
            
            return df
        
        except Exception as e:
            self.logger.error(f"解析CSV文件时出错: {str(e)}")
            return pd.DataFrame()
    
    def parse_txt_file(self, txt_file: str, output_file: Optional[str] = None,
                      encoding: Optional[str] = None) -> List[str]:
        """
        解析TXT文件中的崩溃堆栈
        
        Args:
            txt_file: TXT文件路径
            output_file: 输出TXT文件路径，如果为None则不保存
            encoding: 文件编码，如果为None则自动检测
            
        Returns:
            包含解析结果的列表
        """
        try:
            # 检测文件编码
            if not encoding:
                encoding = self.detect_file_encoding(txt_file)
            
            # 读取TXT文件
            self.logger.info(f"正在读取TXT文件: {txt_file}, 编码: {encoding}")
            
            try:
                # 尝试使用检测到的编码读取
                with open(txt_file, 'r', encoding=encoding) as f:
                    lines = f.readlines()
            except UnicodeDecodeError:
                # 如果编码错误，尝试其他常见编码
                self.logger.warning(f"使用 {encoding} 编码读取失败，尝试其他编码")
                encodings_to_try = ['utf-8-sig', 'latin1', 'iso-8859-1', 'cp1252']
                
                for enc in encodings_to_try:
                    try:
                        with open(txt_file, 'r', encoding=enc) as f:
                            lines = f.readlines()
                        self.logger.info(f"成功使用编码 {enc} 读取文件")
                        break
                    except UnicodeDecodeError:
                        continue
                else:
                    # 如果所有编码都失败，使用替换错误策略
                    self.logger.warning("所有编码尝试失败，使用替换错误策略")
                    with open(txt_file, 'r', encoding='utf-8', errors='replace') as f:
                        lines = f.readlines()
            
            # 过滤空行
            lines = [line.strip() for line in lines if line.strip()]
            
            # 解析堆栈
            self.logger.info(f"正在解析崩溃堆栈，共 {len(lines)} 行")
            parsed_results = self._parse_stacks_parallel(lines)
            
            # 保存结果
            if output_file:
                self.logger.info(f"正在保存解析结果到: {output_file}")
                with open(output_file, 'w', encoding='utf-8') as f:
                    for result in parsed_results:
                        f.write(f"{result}\n")
            
            return parsed_results
        
        except Exception as e:
            self.logger.error(f"解析TXT文件时出错: {str(e)}")
            return []
    
    def _detect_stack_column(self, df: pd.DataFrame) -> Optional[str]:
        """
        自动检测包含崩溃堆栈的列
        
        Args:
            df: DataFrame
            
        Returns:
            列名，如果未找到则返回None
        """
        # 崩溃堆栈的特征模式
        pattern = r'0x[0-9a-fA-F]+.*\+\s*\d+'
        
        for column in df.columns:
            # 检查前10行数据（或更少，如果数据不足10行）
            sample_size = min(10, len(df))
            for i in range(sample_size):
                value = df.iloc[i][column]
                if isinstance(value, str) and re.search(pattern, value):
                    self.logger.info(f"自动检测到崩溃堆栈列: {column}")
                    return column
        
        return None
    
    def _parse_stack(self, stack: Optional[str]) -> str:
        """
        解析单个崩溃堆栈
        
        Args:
            stack: 崩溃堆栈字符串
            
        Returns:
            解析后的符号化文本
        """
        if stack is None or not isinstance(stack, str) or not stack.strip():
            return "无效的崩溃堆栈"
        
        try:
            # 分割堆栈为多行
            stack_lines = stack.strip().split('\n')
            
            # 解析每一行
            parsed_lines = []
            for line in stack_lines:
                if not line.strip():
                    continue
                
                parsed_line = self._parse_stack_line(line.strip())
                parsed_lines.append(parsed_line)
            
            # 合并结果
            return '\n'.join(parsed_lines)
        
        except Exception as e:
            self.logger.error(f"解析崩溃堆栈时出错: {str(e)}")
            return f"解析失败: {str(e)}"
    
    def _parse_stack_line(self, line: str) -> str:
        """
        解析单行崩溃堆栈
        
        Args:
            line: 崩溃堆栈行
            
        Returns:
            解析后的符号化文本
        """
        # 使用正则表达式解析堆栈行
        pattern = r'(\d+)\s+(\S+)\s+(0x[0-9a-fA-F]+)(?:\s+(\S+)(?:\s+\+\s+(\d+))?)?'
        match = re.match(pattern, line.strip())
        
        if not match:
            return f"解析失败: 格式不匹配 - {line}"
        
        index, module_name, address, image_name, offset = match.groups()
        
        # 查找匹配的dSYM文件
        dsym_file = self.dsym_manager.find_matching_dsym(module_name, self.dsym_files)
        if not dsym_file:
            return f"解析失败: 未找到对应的dSYM文件 - {module_name}"
        
        try:
            # 创建符号解析器并解析
            parser = SymbolParser(dsym_file, self.arch)
            return parser.parse_stack_line(line)
        
        except Exception as e:
            self.logger.error(f"解析崩溃堆栈行时出错: {str(e)}")
            return f"解析失败: {str(e)} - {line}"
    
    def _parse_stacks_parallel(self, stacks: List[Optional[str]]) -> List[str]:
        """
        并行解析多个崩溃堆栈
        
        Args:
            stacks: 崩溃堆栈列表
            
        Returns:
            解析结果列表
        """
        results = []
        
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            results = list(executor.map(self._parse_stack, stacks))
        
        return results


# 使用示例
if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(level=logging.INFO)
    
    # 示例代码
    dsym_dir = "/Users/user/Desktop/dSYMs"
    csv_file = "/Users/user/Desktop/crashes.csv"
    
    parser = CrashParser(dsym_dir)
    
    # 解析CSV文件
    results = parser.parse_csv_file(
        csv_file=csv_file,
        stack_column="crash_stack",  # 如果未指定，将自动检测
        output_file="/Users/user/Desktop/crashes_parsed.csv"
    )
    
    print(f"解析完成，共 {len(results)} 行") 