#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高效替换引擎
使用多种优化策略执行大规模符号替换
"""

import os
import re
import mmap
import time
from typing import Dict, List, Tuple, Optional, Any, Set, Pattern
from dataclasses import dataclass, field
from collections import defaultdict
from enum import Enum
from pathlib import Path

from ..utils_module.logger import ObfuscationLogger
from ..utils_module.language_utils import detect_language_for_file, get_extension_group
from ..obfuscation_module.thread_safe_manager import ThreadSafeSymbolManager
from .context_analyzer import ContextAnalyzer
from .ast_rewriter import ASTRewriterFactory
from .lsp_ast_coordinator import LSPASTCoordinator


class ReplacementStrategy(Enum):
    """替换策略"""
    REGEX = "regex"                 # 正则表达式替换
    TOKEN_BASED = "token_based"     # 基于词法分析的替换
    AST_BASED = "ast_based"         # 基于AST的替换
    HYBRID = "hybrid"               # 混合策略


@dataclass
class ReplacementPattern:
    """替换模式"""
    pattern: str
    regex: Optional[Pattern] = None
    word_boundary: bool = True      # 是否需要词边界
    case_sensitive: bool = True     # 是否大小写敏感
    context_aware: bool = False     # 是否上下文感知
    exclude_patterns: List[str] = field(default_factory=list)  # 排除模式


@dataclass
class ReplacementTask:
    """替换任务"""
    task_id: str
    file_path: str
    original_symbol: str
    new_symbol: str
    pattern: ReplacementPattern
    occurrences: int = 0
    replaced: int = 0
    metadata: Dict[str, Any] = field(default_factory=dict)


@dataclass
class ReplacementResult:
    """替换结果"""
    file_path: str
    total_replacements: int
    replacements: Dict[str, int]  # symbol -> count
    errors: List[str] = field(default_factory=list)
    time_taken: float = 0.0


class ReplacementEngine:
    """高效替换引擎"""

    def __init__(self, symbol_manager: ThreadSafeSymbolManager = None,
                 logger: ObfuscationLogger = None,
                 use_mmap: bool = True):
        """
        初始化替换引擎

        Args:
            symbol_manager: 符号管理器
            logger: 日志记录器
            use_mmap: 是否使用内存映射文件
        """
        self.symbol_manager = symbol_manager or ThreadSafeSymbolManager()
        self.logger = logger
        self.use_mmap = use_mmap

        # 替换缓存
        self.pattern_cache: Dict[str, Pattern] = {}
        self.replacement_cache: Dict[str, str] = {}

        # 语言特定的替换规则
        self.language_rules = {
            'swift': SwiftReplacementRules(),
            'objc': ObjCReplacementRules(),
            'kotlin': KotlinReplacementRules(),
            'java': JavaReplacementRules()
        }

        # 上下文分析器（用于避免误替换字符串和注释）
        self.context_analyzers = {
            'swift': ContextAnalyzer('swift'),
            'objc': ContextAnalyzer('objc'),
            'kotlin': ContextAnalyzer('kotlin'),
            'java': ContextAnalyzer('java'),
        }

        # LSP与AST协调器
        self.coordinator = LSPASTCoordinator(logger)

        # 统计信息
        self.stats = {
            'total_files': 0,
            'total_replacements': 0,
            'total_time': 0.0,
            'errors': 0,
            'context_checks': 0,
            'safe_replacements': 0,
            'blocked_replacements': 0
        }

        # 默认策略
        self.current_strategy = ReplacementStrategy.HYBRID

        if logger:
            logger.log_operation("替换引擎", f"初始化完成: mmap={use_mmap}, context_aware=True, coordinator=enabled")

    def set_strategy(self, strategy: ReplacementStrategy):
        """
        设置替换策略

        Args:
            strategy: 替换策略
        """
        self.current_strategy = strategy
        if self.logger:
            self.logger.log_operation("替换引擎", f"策略设置为: {strategy.value}")

    def replace_in_file(
        self,
        file_path: str,
        replacements: Dict[str, str],
        strategy: ReplacementStrategy = ReplacementStrategy.HYBRID,
        dry_run: bool = False
    ) -> ReplacementResult:
        """
        在文件中执行替换

        Args:
            file_path: 文件路径
            replacements: 替换映射 {原始符号: 新符号}
            strategy: 替换策略

        Returns:
            替换结果
        """
        start_time = time.time()
        result = ReplacementResult(file_path=file_path, total_replacements=0, replacements={})

        if not os.path.exists(file_path):
            result.errors.append(f"文件不存在: {file_path}")
            return result

        try:
            # 检测文件类型和语言
            language = self._detect_language(file_path)

            # 选择替换策略
            if strategy == ReplacementStrategy.HYBRID:
                # 混合策略：根据文件类型自动选择
                if language in ['swift', 'kotlin', 'java']:
                    content = self._ast_based_replace(file_path, replacements, language)
                else:
                    content = self._regex_based_replace(file_path, replacements, language)
            elif strategy == ReplacementStrategy.AST_BASED:
                content = self._ast_based_replace(file_path, replacements, language)
            elif strategy == ReplacementStrategy.TOKEN_BASED:
                content = self._token_based_replace(file_path, replacements, language)
            else:  # REGEX
                content = self._regex_based_replace(file_path, replacements, language)

            # 写回文件
            if content is not None:
                if not dry_run:
                    self._write_file(file_path, content)

                # 统计替换数量
                for symbol, count in self._count_replacements(content, replacements).items():
                    result.replacements[symbol] = count
                    result.total_replacements += count

            result.time_taken = time.time() - start_time
            self._update_stats(result)

            if self.logger:
                self.logger.log_operation(
                    "文件替换",
                    f"{file_path}: {result.total_replacements} 处替换, "
                    f"耗时 {result.time_taken:.3f}秒"
                )

        except Exception as e:
            result.errors.append(str(e))
            self.stats['errors'] += 1
            if self.logger:
                self.logger.log_error(f"替换失败 {file_path}: {e}")

        return result

    def batch_replace(self, tasks: List[ReplacementTask],
                     parallel: bool = True) -> Dict[str, ReplacementResult]:
        """
        批量替换

        Args:
            tasks: 替换任务列表
            parallel: 是否并行处理

        Returns:
            结果映射 {文件路径: 替换结果}
        """
        results = {}

        if parallel:
            import concurrent.futures
            with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
                future_to_task = {
                    executor.submit(self._process_task, task): task
                    for task in tasks
                }

                for future in concurrent.futures.as_completed(future_to_task):
                    task = future_to_task[future]
                    try:
                        result = future.result()
                        results[task.file_path] = result
                    except Exception as e:
                        if self.logger:
                            self.logger.log_error(f"任务失败 {task.task_id}: {e}")
        else:
            for task in tasks:
                result = self._process_task(task)
                results[task.file_path] = result

        return results

    def _process_task(self, task: ReplacementTask) -> ReplacementResult:
        """
        处理单个替换任务

        Args:
            task: 替换任务

        Returns:
            替换结果
        """
        replacements = {task.original_symbol: task.new_symbol}
        return self.replace_in_file(task.file_path, replacements)

    def _regex_based_replace(self, file_path: str, replacements: Dict[str, str],
                           language: str) -> Optional[str]:
        """
        基于正则表达式的替换（带上下文检查）

        Args:
            file_path: 文件路径
            replacements: 替换映射
            language: 语言类型

        Returns:
            替换后的内容
        """
        # 读取文件内容
        content = self._read_file(file_path)
        if content is None:
            return None

        # 获取上下文分析器
        analyzer = self.context_analyzers.get(language)
        if not analyzer:
            if self.logger:
                self.logger.log_warning(f"语言{language}没有上下文分析器，可能误替换")
            analyzer = self.context_analyzers.get('swift')  # 默认使用Swift规则

        # 获取语言规则
        rules = self.language_rules.get(language)

        # 构建并排序替换模式（长的先替换）
        sorted_symbols = sorted(replacements.keys(), key=len, reverse=True)

        for original in sorted_symbols:
            new_symbol = replacements[original]
            pattern = self._get_pattern(original, language, rules)

            if pattern:
                # 使用上下文感知的替换
                def create_replace_callback(current_analyzer, current_content, current_new_symbol):
                    def replace_callback(match):
                        pos = match.start()

                        # 🔧 上下文检查 - 核心改进
                        self.stats['context_checks'] += 1

                        if not current_analyzer.is_safe_to_replace(pos, current_content):
                            # 不安全 - 在字符串、注释或import中
                            self.stats['blocked_replacements'] += 1
                            return match.group(0)

                        # 安全 - 可以替换
                        self.stats['safe_replacements'] += 1
                        return current_new_symbol

                    return replace_callback

                # 执行替换
                content = pattern.sub(create_replace_callback(analyzer, content, new_symbol), content)

        return content

    def _token_based_replace(
        self,
        file_path: str,
        replacements: Dict[str, str],
        language: str,
        content: Optional[str] = None
    ) -> Optional[str]:
        """
        基于词法分析的替换

        Args:
            file_path: 文件路径
            replacements: 替换映射
            language: 语言类型

        Returns:
            替换后的内容
        """
        if content is None:
            content = self._read_file(file_path)
        if content is None:
            return None

        # 词法分析
        tokens = self._tokenize(content, language)

        # 替换符号
        result = []
        for token_type, token_value, start, end in tokens:
            if token_type == 'identifier' and token_value in replacements:
                result.append(replacements[token_value])
            else:
                result.append(token_value)

        return ''.join(result)

    def _ast_based_replace(self, file_path: str, replacements: Dict[str, str],
                          language: str) -> Optional[str]:
        """
        基于AST的替换（最精确）

        Args:
            file_path: 文件路径
            replacements: 替换映射
            language: 语言类型

        Returns:
            替换后的内容
        """
        original_content = self._read_file(file_path)
        if original_content is None:
            return None

        # 检查协调器是否应该使用AST
        symbols = set(replacements.keys())
        if not self.coordinator.should_process_with_ast(file_path, symbols):
            if self.logger:
                self.logger.log_operation("AST协调", f"跳过AST处理: {file_path}")
            # 直接使用其他方法
            return self._token_based_replace(file_path, replacements, language, content=original_content)

        processed_symbols: Dict[str, str] = {}
        remaining_symbols: Dict[str, str] = dict(replacements)

        # 尝试使用AST重写器
        rewriter = ASTRewriterFactory.create_rewriter(language, self.logger)

        if rewriter and rewriter.is_available():
            if self.logger:
                self.logger.log_operation("AST替换", f"使用{language} AST重写器")

            rewritten_content = rewriter.rewrite(file_path, replacements)

            if rewritten_content is not None and rewritten_content != original_content:
                for original, new_symbol in replacements.items():
                    original_before = original_content.count(original)
                    original_after = rewritten_content.count(original)
                    new_before = original_content.count(new_symbol)
                    new_after = rewritten_content.count(new_symbol)

                    if original_before > 0 and original_after < original_before and new_after > new_before:
                        processed_symbols[original] = new_symbol
                        remaining_symbols.pop(original, None)

                if processed_symbols:
                    self.coordinator.record_ast_processing(file_path, processed_symbols, True)
                    if remaining_symbols and self.logger:
                        self.logger.log_warning(
                            f"AST重写仅覆盖 {len(processed_symbols)} 个符号，剩余 {len(remaining_symbols)} 个交由后续流程处理"
                        )
                    if self.logger:
                        self.logger.log_operation("AST替换", f"成功重写{file_path} 中 {len(processed_symbols)} 个符号")
                    return rewritten_content
                else:
                    if self.logger:
                        self.logger.log_warning("AST重写未检测到有效替换，继续回退流程")
            else:
                self.coordinator.record_ast_processing(file_path, replacements, False,
                                                      "AST重写无变化或失败")
                if self.logger:
                    self.logger.log_warning(f"AST重写无变化，尝试其他方法: {file_path}")
        else:
            if self.logger:
                self.logger.log_warning(f"{language} AST重写器不可用，回退其他策略")

        # AST重写失败或不可用，尝试token-based替换
        if self.logger:
            self.logger.log_operation("替换策略", "回退到token-based替换")

        token_based = self._token_based_replace(
            file_path,
            replacements,
            language,
            content=original_content
        )

        if token_based is not None and token_based != original_content:
            # 记录回退处理
            self.coordinator.record_regex_fallback(file_path, remaining_symbols)
            return token_based

        # 最后回退到正则替换
        if self.logger:
            self.logger.log_operation("替换策略", "回退到regex替换")

        result = self._regex_based_replace(file_path, replacements, language)
        if result is not None and result != original_content:
            self.coordinator.record_regex_fallback(file_path, remaining_symbols)

        return result

    def _get_pattern(self, symbol: str, language: str,
                    rules: Optional['LanguageRules']) -> Optional[Pattern]:
        """
        获取替换模式

        Args:
            symbol: 符号
            language: 语言类型
            rules: 语言规则

        Returns:
            编译后的正则模式
        """
        # 检查缓存
        cache_key = f"{language}_{symbol}"
        if cache_key in self.pattern_cache:
            return self.pattern_cache[cache_key]

        # 转义特殊字符
        escaped_symbol = re.escape(symbol)

        # 根据语言构建模式
        if rules:
            pattern_str = rules.build_pattern(escaped_symbol)
        else:
            # 默认模式：词边界
            pattern_str = r'\b' + escaped_symbol + r'\b'

        try:
            pattern = re.compile(pattern_str)
            self.pattern_cache[cache_key] = pattern
            return pattern
        except re.error as e:
            if self.logger:
                self.logger.log_error(f"正则编译失败 {symbol}: {e}")
            return None

    def _format_replacement(self, match: re.Match, new_symbol: str) -> str:
        """
        格式化替换文本

        Args:
            match: 正则匹配对象
            new_symbol: 新符号

        Returns:
            格式化后的替换文本
        """
        # 保持原始的大小写风格
        original = match.group(0)

        if original.isupper():
            return new_symbol.upper()
        elif original[0].isupper():
            return new_symbol[0].upper() + new_symbol[1:]
        else:
            return new_symbol

    def _tokenize(self, content: str, language: str) -> List[Tuple[str, str, int, int]]:
        """
        词法分析

        Args:
            content: 文件内容
            language: 语言类型

        Returns:
            词法单元列表 [(类型, 值, 开始位置, 结束位置)]
        """
        tokens = []

        # 简化的词法分析
        # 实际应该使用语言特定的词法分析器
        token_pattern = re.compile(r'(\w+|"[^"]*"|\'[^\']*\'|//.*?$|/\*.*?\*/|.)', re.MULTILINE | re.DOTALL)

        for match in token_pattern.finditer(content):
            token = match.group(0)
            start = match.start()
            end = match.end()

            # 分类
            if re.match(r'^\w+$', token):
                if token in ['class', 'func', 'var', 'let', 'import', 'struct', 'enum']:
                    token_type = 'keyword'
                else:
                    token_type = 'identifier'
            elif token.startswith('"') or token.startswith("'"):
                token_type = 'string'
            elif token.startswith('//') or token.startswith('/*'):
                token_type = 'comment'
            else:
                token_type = 'operator'

            tokens.append((token_type, token, start, end))

        return tokens

    def _detect_language(self, file_path: str) -> str:
        """
        检测文件语言类型

        Args:
            file_path: 文件路径

        Returns:
            语言类型
        """
        language = detect_language_for_file(file_path)
        if language:
            return language

        ext = Path(file_path).suffix.lower()
        if ext in get_extension_group('xml_like'):
            return 'xml'
        return 'unknown'

    def _read_file(self, file_path: str) -> Optional[str]:
        """
        读取文件内容

        Args:
            file_path: 文件路径

        Returns:
            文件内容
        """
        try:
            if self.use_mmap and os.path.getsize(file_path) > 1024 * 1024:  # 1MB以上使用mmap
                with open(file_path, 'r+b') as f:
                    with mmap.mmap(f.fileno(), 0) as mmapped:
                        return mmapped.read().decode('utf-8')
            else:
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
        except Exception as e:
            if self.logger:
                self.logger.log_error(f"读取文件失败 {file_path}: {e}")
            return None

    def _write_file(self, file_path: str, content: str):
        """
        写入文件内容

        Args:
            file_path: 文件路径
            content: 文件内容
        """
        # 创建备份
        backup_path = f"{file_path}.bak"
        had_original = os.path.exists(file_path)
        if had_original:
            import shutil
            shutil.copy2(file_path, backup_path)

        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)
            if had_original and os.path.exists(backup_path):
                os.remove(backup_path)
        except Exception as e:
            # 恢复备份
            if had_original and os.path.exists(backup_path):
                import shutil
                shutil.move(backup_path, file_path)
            raise e
        finally:
            # 删除备份
            if os.path.exists(backup_path):
                os.remove(backup_path)

    def _count_replacements(self, content: str, replacements: Dict[str, str]) -> Dict[str, int]:
        """
        统计替换次数

        Args:
            content: 替换后的内容
            replacements: 替换映射

        Returns:
            替换次数统计
        """
        counts = {}
        for original, new_symbol in replacements.items():
            counts[original] = content.count(new_symbol)
        return counts

    def _update_stats(self, result: ReplacementResult):
        """
        更新统计信息

        Args:
            result: 替换结果
        """
        self.stats['total_files'] += 1
        self.stats['total_replacements'] += result.total_replacements
        self.stats['total_time'] += result.time_taken

    def optimize_replacement_order(self, replacements: Dict[str, str]) -> List[Tuple[str, str]]:
        """
        优化替换顺序（避免冲突）

        Args:
            replacements: 替换映射

        Returns:
            优化后的替换顺序
        """
        # 检测潜在冲突
        conflicts = []
        for old1, new1 in replacements.items():
            for old2, new2 in replacements.items():
                if old1 != old2:
                    if old1 in new2 or old2 in new1:
                        conflicts.append((old1, old2))

        # 拓扑排序解决依赖
        if conflicts:
            import networkx as nx
            G = nx.DiGraph()
            G.add_edges_from(conflicts)

            try:
                order = list(nx.topological_sort(G))
                # 添加没有冲突的符号
                for symbol in replacements:
                    if symbol not in order:
                        order.append(symbol)
            except nx.NetworkXUnfeasible:
                # 有循环依赖，按长度排序
                order = sorted(replacements.keys(), key=len, reverse=True)
        else:
            # 无冲突，按长度排序（长的先替换）
            order = sorted(replacements.keys(), key=len, reverse=True)

        return [(symbol, replacements[symbol]) for symbol in order]

    def get_statistics(self) -> Dict[str, Any]:
        """
        获取统计信息

        Returns:
            统计信息字典
        """
        avg_time = (self.stats['total_time'] / self.stats['total_files']
                   if self.stats['total_files'] > 0 else 0)

        return {
            **self.stats,
            'avg_time_per_file': avg_time,
            'pattern_cache_size': len(self.pattern_cache),
            'replacement_cache_size': len(self.replacement_cache)
        }


class LanguageRules:
    """语言规则基类"""

    def build_pattern(self, symbol: str) -> str:
        """构建替换模式"""
        return r'\b' + symbol + r'\b'

    def should_replace(self, match: re.Match, content: str) -> bool:
        """判断是否应该替换"""
        return True


class SwiftReplacementRules(LanguageRules):
    """Swift替换规则"""

    def build_pattern(self, symbol: str) -> str:
        # Swift支持Unicode标识符，需要更精确的边界匹配
        # 允许点号后的符号（方法调用、属性访问），只检查单词边界
        return r'(?<![\w])' + symbol + r'(?![\w])'

    def should_replace(self, match: re.Match, content: str) -> bool:
        # 获取匹配位置前的所有内容，用于检查上下文
        before_match = content[:match.start()]

        # 检查是否在字符串中（更准确的方法）
        # 计算到当前位置的引号数量
        double_quotes = before_match.count('"') - before_match.count('\\"')
        if double_quotes % 2 == 1:  # 奇数个引号，在字符串中
            return False

        # 检查单引号字符串
        single_quotes = before_match.count("'") - before_match.count("\\'")
        if single_quotes % 2 == 1:  # 奇数个单引号，在字符串中
            return False

        # 检查是否在注释中
        # 查找最后一个换行符位置
        last_newline = before_match.rfind('\n')
        if last_newline == -1:
            last_newline = 0
        else:
            last_newline += 1

        # 获取当前行到匹配位置的内容
        line_before = content[last_newline:match.start()]

        # 检查单行注释
        if '//' in line_before:
            comment_pos = line_before.find('//')
            # 确保不在字符串中的//
            before_comment = line_before[:comment_pos]
            if before_comment.count('"') % 2 == 0:  # 确保//不在字符串中
                return False  # 在注释中，不替换

        return True


class ObjCReplacementRules(LanguageRules):
    """Objective-C替换规则"""

    def build_pattern(self, symbol: str) -> str:
        # Objective-C方法名可能包含冒号
        if ':' in symbol:
            # 方法名模式
            return r'(?<![.\w])' + symbol.replace(':', r':(?:\s*\([^)]*\)\s*\w+\s*)?') + r'(?![.\w])'
        else:
            return r'\b' + symbol + r'\b'


class KotlinReplacementRules(LanguageRules):
    """Kotlin替换规则"""

    def build_pattern(self, symbol: str) -> str:
        # Kotlin支持反引号标识符
        if symbol.startswith('`') and symbol.endswith('`'):
            return re.escape(symbol)
        return r'\b' + symbol + r'\b'


class JavaReplacementRules(LanguageRules):
    """Java替换规则"""

    def build_pattern(self, symbol: str) -> str:
        return r'\b' + symbol + r'\b'

    def should_replace(self, match: re.Match, content: str) -> bool:
        # 检查是否在import语句中
        line_start = content.rfind('\n', 0, match.start()) + 1
        line = content[line_start:match.end()]

        if line.strip().startswith('import'):
            return False  # 不替换import语句中的符号

        return True
