#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
依赖分析器 - 分析文件间的依赖关系
用于确保并行处理时的正确顺序
"""

import os
from pathlib import Path
from typing import Dict, List, Set, Optional
from collections import defaultdict, deque
import networkx as nx

from ..utils_module.logger import ObfuscationLogger
from ..builtin_config_module.builtin_config import BuiltInConfig
from ..utils_module.language_utils import detect_language_for_file
from ..utils_module.pbxproj_utils import extract_build_metadata
from .dependency_strategies import (
    BaseLanguageDependencyStrategy,
    SwiftDependencyStrategy,
    ObjectiveCDependencyStrategy,
    KotlinDependencyStrategy,
    JavaDependencyStrategy,
)


class DependencyAnalyzer:
    """文件依赖分析器"""

    def __init__(self, config: BuiltInConfig, logger: ObfuscationLogger):
        """
        初始化依赖分析器

        Args:
            config: 配置对象
            logger: 日志记录器
        """
        self.config = config
        self.logger = logger
        self.dependency_graph = nx.DiGraph()

        # 文件到模块的映射
        self.file_to_module: Dict[str, str] = {}
        # 模块到文件的映射
        self.module_to_files: Dict[str, Set[str]] = defaultdict(set)
        # 依赖缓存
        self.dependency_cache: Dict[str, List[str]] = {}
        # 循环依赖记录
        self.circular_dependencies: List[List[str]] = []

        self.language_strategies: Dict[str, BaseLanguageDependencyStrategy] = {
            'swift': SwiftDependencyStrategy(logger),
            'objc': ObjectiveCDependencyStrategy(logger),
            'kotlin': KotlinDependencyStrategy(logger),
            'java': JavaDependencyStrategy(logger),
        }
        self.global_language_dependencies: Dict[str, Set[str]] = defaultdict(set)
        self.language_header_search_paths: Dict[str, Set[str]] = defaultdict(set)
        self._project_metadata_loaded = False

    def analyze_dependencies(self, file_groups: Dict[str, List[str]],
                           symbols: Dict) -> Dict[str, List[str]]:
        """
        分析文件依赖关系

        Args:
            file_groups: 文件分组
            symbols: 符号信息

        Returns:
            依赖关系映射 {文件: [依赖的文件列表]}
        """
        import time
        start_time = time.time()

        # 计算总文件数
        total_files = sum(len(files) for files in file_groups.values())
        self.logger.log_operation("依赖分析", f"开始分析 {total_files} 个文件的依赖关系")

        dependencies = {}

        self._ensure_project_metadata()

        # 首先构建文件到模块的映射
        self._build_module_mappings(file_groups)

        # 构建符号定义索引（优化：O(n) 而不是每次查找 O(n)）
        self.logger.log_operation("依赖分析", "构建符号索引...")
        symbol_to_file = self._build_symbol_index(file_groups, symbols)
        self.logger.log_operation("依赖分析", f"符号索引完成，共 {len(symbol_to_file)} 个符号")

        # 分析每种语言的依赖
        processed = 0
        for lang, files in file_groups.items():
            strategy = self.language_strategies.get(lang)
            if not strategy:
                continue

            lang_start = time.time()

            for i, file_path in enumerate(files):
                # 进度提示（每处理100个文件）
                if processed % 100 == 0 and processed > 0:
                    progress = (processed / total_files) * 100
                    elapsed = time.time() - start_time
                    self.logger.log_operation(
                        "依赖分析进度",
                        f"{progress:.1f}% ({processed}/{total_files} 文件，耗时 {elapsed:.1f}秒)"
                    )

                # 使用缓存
                if file_path in self.dependency_cache:
                    deps = self.dependency_cache[file_path]
                else:
                    deps = self._extract_dependencies(file_path, strategy, symbols)
                    self.dependency_cache[file_path] = deps

                dependencies[file_path] = deps

                # 构建依赖图
                if file_path not in self.dependency_graph:
                    self.dependency_graph.add_node(file_path)

                processed += 1

                for dep in deps:
                    if dep not in self.dependency_graph:
                        self.dependency_graph.add_node(dep)
                    self.dependency_graph.add_edge(file_path, dep)

            lang_elapsed = time.time() - lang_start
            self.logger.log_operation(
                f"{lang}依赖分析",
                f"完成 {len(files)} 个文件，耗时 {lang_elapsed:.2f}秒"
            )

        # 检测循环依赖
        self._detect_circular_dependencies()

        total_elapsed = time.time() - start_time
        self.logger.log_operation(
            "依赖分析完成",
            f"分析了 {len(dependencies)} 个文件的依赖关系，"
            f"发现 {len(self.circular_dependencies)} 个循环依赖，"
            f"总耗时 {total_elapsed:.2f}秒"
        )

        return dependencies

    def _extract_dependencies(
        self,
        file_path: str,
        strategy: BaseLanguageDependencyStrategy,
        symbols: Dict,
    ) -> List[str]:
        """
        提取单个文件的依赖

        Args:
            file_path: 文件路径
            strategy: 语言处理策略
            symbols: 符号信息

        Returns:
            依赖文件列表
        """
        dependencies = set()

        if not os.path.exists(file_path):
            return []

        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
        except Exception as e:
            self.logger.log_error(f"读取文件 {file_path} 失败: {e}")
            return []

        if not strategy:
            return []

        # 提取导入语句
        imports = strategy.extract_imports(content)

        # 将导入转换为文件依赖
        for import_name in imports:
            dep_files = strategy.resolve_import(
                import_name, file_path, self.module_to_files
            )
            dependencies.update(dep_files)

        # 从符号信息中提取依赖
        if symbols and file_path in symbols:
            file_symbols = symbols[file_path]
            if 'dependencies' in file_symbols:
                for dep in file_symbols['dependencies']:
                    if isinstance(dep, str) and os.path.exists(dep):
                        dependencies.add(dep)

        # 全局语言依赖（如桥接头、前缀头）
        global_deps = self.global_language_dependencies.get(strategy.language, set())
        for dep in global_deps:
            if dep != file_path:
                dependencies.add(dep)

        # 排除自依赖
        dependencies.discard(file_path)

        return list(dependencies)

    def extract_imports(self, file_path: str) -> Set[str]:
        """
        从文件提取导入语句（公共接口）

        Args:
            file_path: 文件路径

        Returns:
            导入的模块集合
        """
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()

            language = detect_language_for_file(file_path)
            if not language:
                return set()

            strategy = self.language_strategies.get(language)
            if not strategy:
                return set()

            return strategy.extract_imports(content)
        except Exception as e:
            if self.logger:
                self.logger.log_warning(f"提取导入失败 {file_path}: {e}")
            return set()

    def _build_module_mappings(self, file_groups: Dict[str, List[str]]):
        """
        构建文件到模块的映射关系

        Args:
            file_groups: 文件分组
        """
        for lang, files in file_groups.items():
            strategy = self.language_strategies.get(lang)
            if not strategy:
                continue

            for file_path in files:
                # 提取模块名
                module_name = strategy.extract_module_name(file_path)
                if module_name:
                    self.file_to_module[file_path] = module_name
                    self.module_to_files[module_name].add(file_path)

    def _ensure_project_metadata(self):
        """
        确保已加载项目元数据（如Xcode pbxproj）
        """
        if self._project_metadata_loaded:
            return

        self._project_metadata_loaded = True

        project_path = getattr(self.config, 'project_path', '') or ''
        if project_path:
            self._load_pbxproj_metadata(project_path)
        self._apply_manual_overrides(project_path)
        self._apply_header_search_paths()

    def _apply_header_search_paths(self):
        """
        应用头文件搜索路径到相关语言策略
        """
        objc_paths = sorted(self.language_header_search_paths.get('objc', set()))
        objc_strategy = self.language_strategies.get('objc')
        if objc_strategy and hasattr(objc_strategy, 'set_header_search_paths'):
            objc_strategy.set_header_search_paths(objc_paths)

    def _load_pbxproj_metadata(self, project_path: str):
        """
        加载Xcode项目的pbxproj元数据
        Args:
            project_path: Xcode项目路径
        """
        project_root = Path(project_path)
        if not project_root.exists():
            return

        pbxproj_paths = list(project_root.rglob("*.xcodeproj/project.pbxproj"))
        seen_missing: Set[str] = set()

        for pbxproj in pbxproj_paths:
            metadata = extract_build_metadata(str(pbxproj))
            if not metadata:
                continue

            for header in metadata.bridging_headers:
                if self._is_excluded_path(header):
                    continue
                if os.path.exists(header):
                    self.global_language_dependencies['swift'].add(header)
                else:
                    if header not in seen_missing:
                        seen_missing.add(header)
                        self.logger.log_warning(f"桥接头文件不存在: {header}")

            for header in metadata.prefix_headers:
                if self._is_excluded_path(header):
                    continue
                if os.path.exists(header):
                    self.global_language_dependencies['objc'].add(header)
                else:
                    if header not in seen_missing:
                        seen_missing.add(header)
                        self.logger.log_warning(f"前缀头文件不存在: {header}")

            for search_path in metadata.header_search_paths:
                if self._is_excluded_path(search_path):
                    continue
                if os.path.isdir(search_path):
                    self.language_header_search_paths['objc'].add(search_path)
                    self.language_header_search_paths['swift'].add(search_path)
                else:
                    if search_path not in seen_missing:
                        seen_missing.add(search_path)
                        self.logger.log_warning(f"头文件搜索路径不存在: {search_path}")

    def _apply_manual_overrides(self, project_path: str):
        """
        应用配置中的手动覆盖设置
        Args:
            project_path: 项目根路径（用于相对路径解析）
        """
        analysis_cfg = getattr(self.config, 'analysis', None)
        if not analysis_cfg:
            return

        project_root = Path(project_path) if project_path else None

        extra_headers = getattr(analysis_cfg, 'extra_global_headers', {}) or {}
        for language, paths in extra_headers.items():
            for raw_path in paths or []:
                normalized = self._normalize_path(raw_path, project_root)
                if not normalized:
                    continue
                if os.path.exists(normalized):
                    self.global_language_dependencies[language].add(normalized)
                else:
                    self.logger.log_warning(f"额外全局依赖文件不存在: {normalized}")

        extra_search_paths = getattr(analysis_cfg, 'extra_header_search_paths', []) or []
        for raw_path in extra_search_paths:
            normalized = self._normalize_path(raw_path, project_root)
            if not normalized:
                continue
            if os.path.isdir(normalized):
                self.language_header_search_paths['objc'].add(normalized)
                self.language_header_search_paths['swift'].add(normalized)
            else:
                self.logger.log_warning(f"额外头文件搜索路径不存在: {normalized}")

    def _normalize_path(self, raw_path: str, project_root: Optional[Path]) -> Optional[str]:
        """
        规范化路径，支持相对路径和用户目录
        Args:
            raw_path: 原始路径
            project_root: 项目根路径（用于相对路径解析）
        """
        if not raw_path:
            return None

        path = Path(raw_path).expanduser()
        if not path.is_absolute():
            base = project_root if project_root else Path.cwd()
            path = base.joinpath(path)

        try:
            return str(path.resolve(strict=False))
        except Exception:
            return str(path)

    def _is_excluded_path(self, path: str) -> bool:
        """
        检查路径是否在排除列表中
        Args:
            path: 要检查的路径
        """
        if not path:
            return False

        filters = getattr(self.config, 'file_filters', None)
        if not filters or not getattr(filters, 'exclude_directories', None):
            return False

        parts = set(Path(path).parts)
        return any(excluded in parts for excluded in filters.exclude_directories)

    def _detect_circular_dependencies(self):
        """
        检测循环依赖
        """
        self.circular_dependencies = []

        try:
            # 使用NetworkX检测所有循环
            cycles = list(nx.simple_cycles(self.dependency_graph))
            self.circular_dependencies = cycles

            if cycles:
                self.logger.log_warning(
                    f"检测到 {len(cycles)} 个循环依赖"
                )
                for cycle in cycles[:5]:  # 只显示前5个
                    cycle_str = ' -> '.join(cycle + [cycle[0]])
                    self.logger.log_warning(f"循环: {cycle_str}")
        except:
            # 图为空或其他错误
            pass

    def get_dependency_layers(self) -> List[List[str]]:
        """
        获取依赖层级（用于并行处理）

        Returns:
            分层的文件列表，同一层可以并行处理
        """
        if not self.dependency_graph.nodes():
            return []

        layers = []

        # 如果存在循环依赖，先处理强连通分量
        if self.has_circular_dependency():
            # 获取强连通分量
            sccs = list(nx.strongly_connected_components(self.dependency_graph))

            # 构建SCC图
            scc_graph = nx.DiGraph()
            scc_mapping = {}  # 节点到SCC的映射

            for i, scc in enumerate(sccs):
                for node in scc:
                    scc_mapping[node] = i
                scc_graph.add_node(i)

            # 添加SCC之间的边
            for u, v in self.dependency_graph.edges():
                scc_u = scc_mapping[u]
                scc_v = scc_mapping[v]
                if scc_u != scc_v:
                    scc_graph.add_edge(scc_u, scc_v)

            # 对SCC图进行拓扑排序
            try:
                scc_order = list(nx.topological_sort(scc_graph))

                # 按照SCC顺序构建层级
                for scc_id in scc_order:
                    layer = list(sccs[scc_id])
                    if layer:
                        layers.append(layer)
            except nx.NetworkXUnfeasible:
                # 仍然有循环，使用备用方案
                self._fallback_layering(layers)
        else:
            # 没有循环依赖，直接使用拓扑排序
            try:
                # 计算每个节点的层级
                node_layers = self._compute_node_layers()

                # 按层级分组
                layer_dict = defaultdict(list)
                for node, layer in node_layers.items():
                    layer_dict[layer].append(node)

                # 按层级顺序添加
                for layer_num in sorted(layer_dict.keys()):
                    layers.append(layer_dict[layer_num])

            except nx.NetworkXUnfeasible:
                self._fallback_layering(layers)

        return layers

    def _compute_node_layers(self) -> Dict[str, int]:
        """
        计算每个节点的层级

        Returns:
            节点到层级的映射
        """
        node_layers = {}

        # 找出所有没有入边的节点（根节点）
        roots = [
            node for node in self.dependency_graph.nodes()
            if self.dependency_graph.in_degree(node) == 0
        ]

        if not roots:
            # 如果没有根节点，选择入度最小的节点
            min_in_degree = min(
                self.dependency_graph.in_degree(node)
                for node in self.dependency_graph.nodes()
            )
            roots = [
                node for node in self.dependency_graph.nodes()
                if self.dependency_graph.in_degree(node) == min_in_degree
            ]

        # BFS计算层级
        visited = set()
        queue = deque([(root, 0) for root in roots])

        while queue:
            node, layer = queue.popleft()

            if node in visited:
                continue

            visited.add(node)
            node_layers[node] = layer

            # 添加后继节点
            for successor in self.dependency_graph.successors(node):
                if successor not in visited:
                    # 检查所有前驱是否都已处理
                    predecessors = list(self.dependency_graph.predecessors(successor))
                    if all(p in visited for p in predecessors):
                        # 层级为所有前驱的最大层级+1
                        max_pred_layer = max(
                            node_layers.get(p, 0) for p in predecessors
                        )
                        queue.append((successor, max_pred_layer + 1))

        # 处理未访问的节点（可能在循环中）
        for node in self.dependency_graph.nodes():
            if node not in node_layers:
                node_layers[node] = len(node_layers)  # 放到最后

        return node_layers

    def _fallback_layering(self, layers: List[List[str]]):
        """
        备用的分层方案

        Args:
            layers: 层级列表
        """
        # 简单地将所有节点放在一层（串行处理）
        all_nodes = list(self.dependency_graph.nodes())
        if all_nodes:
            # 按文件名排序以保证确定性
            all_nodes.sort()
            # 每个文件一层（最保守的方案）
            for node in all_nodes:
                layers.append([node])

    def has_circular_dependency(self) -> bool:
        """
        检测是否存在循环依赖

        Returns:
            是否存在循环依赖
        """
        return len(self.circular_dependencies) > 0

    def get_circular_dependencies(self) -> List[List[str]]:
        """
        获取所有循环依赖

        Returns:
            循环依赖列表
        """
        return self.circular_dependencies

    def get_dependency_stats(self) -> Dict[str, any]:
        """
        获取依赖统计信息

        Returns:
            统计信息字典
        """
        stats = {
            'total_files': self.dependency_graph.number_of_nodes(),
            'total_dependencies': self.dependency_graph.number_of_edges(),
            'circular_dependencies': len(self.circular_dependencies),
            'isolated_files': 0,
            'max_dependencies': 0,
            'avg_dependencies': 0
        }

        if self.dependency_graph.nodes():
            # 孤立文件（既无入边也无出边）
            isolated = [
                node for node in self.dependency_graph.nodes()
                if self.dependency_graph.degree(node) == 0
            ]
            stats['isolated_files'] = len(isolated)

            # 最大依赖数
            out_degrees = [
                self.dependency_graph.out_degree(node)
                for node in self.dependency_graph.nodes()
            ]
            if out_degrees:
                stats['max_dependencies'] = max(out_degrees)
                stats['avg_dependencies'] = sum(out_degrees) / len(out_degrees)

        return stats

    def visualize_dependencies(self, output_path: str = None) -> Optional[str]:
        """
        可视化依赖关系图

        Args:
            output_path: 输出路径

        Returns:
            DOT格式的图描述
        """
        try:
            # 生成DOT格式
            dot_data = nx.drawing.nx_pydot.to_pydot(self.dependency_graph)

            if output_path:
                # 保存到文件
                dot_data.write_raw(output_path)
                self.logger.log_operation(
                    "依赖可视化",
                    f"依赖图已保存到 {output_path}"
                )

            return dot_data.to_string()

        except Exception as e:
            self.logger.log_error(f"生成依赖图失败: {e}")
            return None

    def optimize_processing_order(self, files: List[str]) -> List[str]:
        """
        优化文件处理顺序

        Args:
            files: 待处理的文件列表

        Returns:
            优化后的处理顺序
        """
        # 构建子图
        subgraph = self.dependency_graph.subgraph(files)

        # 尝试拓扑排序
        try:
            return list(nx.topological_sort(subgraph))
        except nx.NetworkXUnfeasible:
            # 有循环，返回原顺序
            self.logger.log_warning("文件存在循环依赖，使用原始顺序")
            return files

    def _build_symbol_index(self, file_groups: Dict[str, List[str]],
                           symbols: Dict) -> Dict[str, str]:
        """
        构建符号到文件的索引（性能优化）

        Args:
            file_groups: 文件分组
            symbols: 符号信息

        Returns:
            符号到文件的映射 {符号名: 文件路径}
        """
        symbol_to_file: Dict[str, str] = {}
        symbols = symbols or {}
        file_group_languages = set(file_groups.keys())

        def record(symbol_name: str, file_path: Optional[str]):
            if symbol_name and file_path and file_path not in ("", None):
                symbol_to_file.setdefault(symbol_name, file_path)

        # 旧格式: {language: {symbol_type: {symbol_name: [paths]}}}
        is_legacy_structure = bool(symbols) and file_group_languages and all(
            key in file_group_languages for key in symbols.keys()
        )

        if is_legacy_structure:
            for lang_symbols in symbols.values():
                if not isinstance(lang_symbols, dict):
                    continue

                for symbol_dict in lang_symbols.values():
                    if not isinstance(symbol_dict, dict):
                        continue

                    for symbol_name, file_paths in symbol_dict.items():
                        resolved_path = None
                        if isinstance(file_paths, list) and file_paths:
                            first = file_paths[0]
                            if isinstance(first, str):
                                resolved_path = first
                            elif isinstance(first, dict):
                                resolved_path = first.get('file')
                        elif isinstance(file_paths, dict):
                            resolved_path = file_paths.get('file')
                        elif isinstance(file_paths, str):
                            resolved_path = file_paths

                        record(symbol_name, resolved_path)

            if symbol_to_file:
                return symbol_to_file

        # 新格式: {symbol_type: {symbol_name: {'file': path, ...}}}
        for symbol_dict in symbols.values():
            if not isinstance(symbol_dict, dict):
                continue

            for symbol_name, payload in symbol_dict.items():
                resolved_path = None

                if isinstance(payload, dict):
                    resolved_path = payload.get('file')
                    if not resolved_path:
                        locations = payload.get('locations') or payload.get('files') or []
                        for location in locations:
                            if isinstance(location, dict) and location.get('file'):
                                resolved_path = location['file']
                                break
                            if isinstance(location, str):
                                resolved_path = location
                                break
                elif isinstance(payload, (list, tuple)):
                    for item in payload:
                        if isinstance(item, str):
                            resolved_path = item
                            break
                        if isinstance(item, dict) and item.get('file'):
                            resolved_path = item['file']
                            break
                elif isinstance(payload, str):
                    resolved_path = payload

                record(symbol_name, resolved_path)

        return symbol_to_file

    def clear_cache(self):
        """
        清除缓存
        """
        self.dependency_cache.clear()
        self.file_to_module.clear()
        self.module_to_files.clear()
        self.circular_dependencies.clear()
        self.dependency_graph.clear()

        self.logger.log_operation("依赖分析", "缓存已清除")
