#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
统一项目分析器
整合项目分析、增量分析、依赖分析和平台检测
"""

import os
import time
from pathlib import Path
from typing import Dict, List, Optional

from ..interfaces_module.analyzer import (
    IProjectAnalyzer,
    AnalysisResult,
    AnalysisOptions,
    FileInfo,
    DependencyInfo,
    Platform
)
from ..builtin_config_module.builtin_config import BuiltInConfig
from ..utils_module.logger import ObfuscationLogger
from ..utils_module.language_utils import SUPPORTED_EXTENSIONS, detect_language_for_file


class ProjectAnalyzer(IProjectAnalyzer):
    """
    统一项目分析器
    使用组合模式整合各种分析功能
    """

    def __init__(self, config: BuiltInConfig, logger: ObfuscationLogger):
        """
        初始化统一项目分析器

        Args:
            config: 配置对象
            logger: 日志记录器
        """
        self.config = config
        self.logger = logger

        # 初始化各个分析组件
        self.components = {}
        self._init_components()

        # 缓存
        self.cache = {}

    def _init_components(self):
        """初始化分析组件"""
        # 增量分析组件
        try:
            from .incremental_analyzer import IncrementalAnalyzer
            from ..cache_module.project_cache_manager import ProjectCacheManager

            cache_manager = ProjectCacheManager(self.config, self.logger, namespace="analysis")
            incremental = IncrementalAnalyzer(cache_manager, self.logger)
            # 传递 self 避免循环创建
            incremental.init_analyzers(self.config, self.logger, project_analyzer=self)
            self.components['incremental'] = incremental
            self.logger.log_operation("组件加载", "增量分析组件已加载")
        except Exception as e:
            self.logger.log_warning(f"增量分析组件加载失败: {e}")

        # 依赖分析组件
        try:
            from .dependency_analyzer import DependencyAnalyzer
            self.components['dependency'] = DependencyAnalyzer(self.config, self.logger)
            self.logger.log_operation("组件加载", "依赖分析组件已加载")
        except Exception as e:
            self.logger.log_warning(f"依赖分析组件加载失败: {e}")

        # 平台检测组件
        try:
            from .platform_detector import PlatformDetector
            self.components['platform'] = PlatformDetector()
            self.logger.log_operation("组件加载", "平台检测组件已加载")
        except Exception as e:
            self.logger.log_warning(f"平台检测组件加载失败: {e}")

        # 符号提取组件
        try:
            from ..extractors_module.symbol_extractor import SymbolExtractor
            self.components['symbol'] = SymbolExtractor(self.config, self.logger)
            self.logger.log_operation("组件加载", "符号提取组件已加载")
        except Exception as e:
            self.logger.log_warning(f"符号提取组件加载失败: {e}")

    def analyze(self, project_path: str, options: AnalysisOptions = None) -> AnalysisResult:
        """
        分析项目

        Args:
            project_path: 项目路径
            options: 分析选项

        Returns:
            AnalysisResult: 分析结果
        """
        options = options or AnalysisOptions()
        start_time = time.time()

        self.logger.log_operation("项目分析", f"开始分析: {project_path}")

        # 初始化结果
        result = AnalysisResult()

        # 1. 检查缓存
        if options.use_cache:
            cached = self.get_cached_result(project_path)
            if cached and options.incremental:
                # 增量分析
                if 'incremental' in self.components:
                    changes = self.components['incremental'].detect_changes(project_path)
                    if not changes:
                        self.logger.log_operation("增量分析", "没有检测到变化，使用缓存")
                        return cached
                    else:
                        self.logger.log_operation("增量分析", f"检测到 {len(changes)} 个文件变化")
                        result = cached  # 基于缓存的结果进行增量更新

        # 2. 平台检测
        if options.detect_platform and 'platform' in self.components:
            platform_str = self.components['platform'].detect_platform(project_path)
            result.metadata['platform_raw'] = platform_str
            platform = self.detect_platform(project_path, platform_override=platform_str)
            result.platform = platform
            self.logger.log_operation("平台检测", f"检测到平台: {platform.value}")

        # 3. 文件扫描和分类
        files = self._scan_project_files(project_path)
        self.logger.log_operation("文件扫描", f"发现 {len(files)} 个文件")

        # 4. 符号提取
        if options.deep_analysis and 'symbol' in self.components:
            self.logger.log_operation("符号提取", "使用深度分析提取符号")
            symbols = self._extract_symbols_deep(files)
            result.symbols = symbols
        else:
            # 基础符号提取
            symbols = self._extract_symbols_basic(files)
            result.symbols = symbols

        # 4.1 语言分组缓存
        language_map: Dict[str, str] = {}
        file_groups: Dict[str, List[str]] = {}
        for file_path in files:
            lang = self._detect_language(file_path)
            language_map[file_path] = lang
            file_groups.setdefault(lang, []).append(file_path)

        result.metadata['file_groups'] = file_groups
        result.metadata['languages'] = language_map

        # 5. 文件信息收集
        for file_path in files:
            file_info = self.analyze_file(file_path, options, language_hint=language_map.get(file_path))
            result.files[file_path] = file_info

        # 6. 依赖分析
        if options.analyze_dependencies and 'dependency' in self.components:
            self.logger.log_operation("依赖分析", "开始分析文件依赖关系")
            dep_info = self.analyze_dependencies(files, result.symbols)
            result.dependencies = dep_info

            if dep_info.circular_dependencies:
                self.logger.log_warning(
                    f"发现 {len(dep_info.circular_dependencies)} 个循环依赖"
                )

        # 7. 统计信息
        result.statistics = {
            'total_files': len(files),
            'total_symbols': result.get_total_symbols(),
            'languages': self._get_language_statistics(result),
            'analysis_time': time.time() - start_time
        }

        # 8. 保存缓存
        if options.use_cache:
            self.save_cache(project_path, result)

        self.logger.log_operation(
            "分析完成",
            f"耗时: {result.statistics['analysis_time']:.2f}秒"
        )

        return result

    def analyze_file(self, file_path: str, options: AnalysisOptions = None,
                     language_hint: str = None) -> FileInfo:
        """分析单个文件"""
        options = options or AnalysisOptions()

        # 获取文件基本信息
        path_obj = Path(file_path)
        language = language_hint or self._detect_language(file_path)
        file_info = FileInfo(
            path=file_path,
            language=language,
            size=path_obj.stat().st_size if path_obj.exists() else 0,
            last_modified=path_obj.stat().st_mtime if path_obj.exists() else 0
        )

        # 提取符号
        if 'symbol' in self.components:
            try:
                result = self.components['symbol'].extract_symbols(file_path)
                file_info.symbols_count = len(result.get_all_symbol_names())
            except Exception as e:
                self.logger.log_warning(f"符号提取失败 {file_path}: {e}")

        # 提取依赖
        if options.analyze_dependencies and 'dependency' in self.components:
            try:
                deps = self.components['dependency'].extract_imports(file_path)
                file_info.dependencies = deps
            except Exception as e:
                self.logger.log_warning(f"依赖提取失败 {file_path}: {e}")

        return file_info

    def detect_platform(self, project_path: str, platform_override: str = None) -> Platform:
        """检测项目平台"""
        if 'platform' in self.components:
            platform_str = platform_override or self.components['platform'].detect_platform(project_path)
            platform_map = {
                'ios': Platform.IOS,
                'android': Platform.ANDROID,
                'unified': Platform.UNIFIED,
                'mixed': Platform.MIXED,
                'react-native': Platform.REACT_NATIVE,
                'flutter': Platform.FLUTTER,
                'ionic': Platform.IONIC,
                'cordova': Platform.CORDOVA,
                'xamarin': Platform.XAMARIN,
                'nativescript': Platform.NATIVESCRIPT
            }
            return platform_map.get(platform_str, Platform.UNKNOWN)
        return Platform.UNKNOWN

    def analyze_dependencies(self, files: List[str], symbols: Dict = None) -> DependencyInfo:
        """分析文件依赖关系"""
        if 'dependency' in self.components:
            analyzer = self.components['dependency']

            # 分析依赖（需要按语言分组）
            try:
                # 按语言分组文件
                file_groups = {}
                for file_path in files:
                    lang = self._detect_language(file_path)
                    if lang not in file_groups:
                        file_groups[lang] = []
                    file_groups[lang].append(file_path)

                dependencies = analyzer.analyze_dependencies(file_groups, symbols or {})
            except TypeError:
                # 如果不需要symbols参数，则直接调用
                try:
                    dependencies = analyzer.analyze_dependencies(files)
                except:
                    dependencies = {}

            # 检测循环依赖（dependency_analyzer自己维护依赖图）
            cycles = analyzer.get_circular_dependencies()

            # 生成依赖层级
            layers = analyzer.get_dependency_layers()

            return DependencyInfo(
                dependencies=dependencies,
                circular_dependencies=cycles,
                dependency_layers=layers
            )

        return DependencyInfo(
            dependencies={},
            circular_dependencies=[],
            dependency_layers=[]
        )

    def get_cached_result(self, project_path: str) -> Optional[AnalysisResult]:
        """获取缓存的分析结果"""
        cache_key = f"analysis_{project_path}"
        if cache_key in self.cache:
            return self.cache[cache_key]

        # 尝试从文件缓存加载
        cache_file = Path(project_path) / '.analysis_cache' / 'result.json'
        if cache_file.exists():
            try:
                import json
                from ..interfaces_module.analyzer import FileInfo, DependencyInfo, Platform
                with open(cache_file, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                result = AnalysisResult()
                platform_value = data.get('platform', Platform.UNKNOWN.value)
                try:
                    result.platform = Platform(platform_value)
                except ValueError:
                    result.platform = Platform.UNKNOWN

                files_data = data.get('files', {})
                for file_path, file_info in files_data.items():
                    info_obj = FileInfo(
                        path=file_path,
                        language=file_info.get('language', 'unknown'),
                        size=file_info.get('size', 0),
                        last_modified=file_info.get('last_modified', 0.0),
                        symbols_count=file_info.get('symbols_count', 0),
                        dependencies=file_info.get('dependencies', [])
                    )
                    result.files[file_path] = info_obj

                result.symbols = data.get('symbols', {})

                if 'dependencies' in data:
                    dep_data = data['dependencies']
                    result.dependencies = DependencyInfo(
                        dependencies=dep_data.get('dependencies', {}),
                        circular_dependencies=dep_data.get('circular_dependencies', []),
                        dependency_layers=dep_data.get('dependency_layers', [])
                    )

                result.statistics = data.get('statistics', {})
                return result
            except Exception as e:
                self.logger.log_warning(f"缓存加载失败: {e}")

        return None

    def save_cache(self, project_path: str, result: AnalysisResult):
        """保存分析结果到缓存"""
        # 内存缓存
        cache_key = f"analysis_{project_path}"
        self.cache[cache_key] = result

        # 文件缓存
        cache_dir = Path(project_path) / '.analysis_cache'
        cache_dir.mkdir(parents=True, exist_ok=True)
        cache_file = cache_dir / 'result.json'

        try:
            import json
            # 使用自定义编码器处理set等特殊类型
            from .incremental_analyzer import StateJSONEncoder

            data = {
                'platform': result.platform.value if result.platform else 'unknown',
                'statistics': result.statistics,
                'timestamp': time.time(),
                'files': {
                    path: {
                        'language': file_info.language,
                        'size': file_info.size,
                        'last_modified': file_info.last_modified,
                        'symbols_count': file_info.symbols_count,
                        'dependencies': file_info.dependencies
                    }
                    for path, file_info in result.files.items()
                },
                'symbols': result.symbols
            }
            if result.dependencies:
                data['dependencies'] = {
                    'dependencies': result.dependencies.dependencies,
                    'circular_dependencies': result.dependencies.circular_dependencies,
                    'dependency_layers': result.dependencies.dependency_layers
                }
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, cls=StateJSONEncoder, indent=2)
        except Exception as e:
            self.logger.log_warning(f"缓存保存失败: {e}")

    def _scan_project_files(self, project_path: str) -> List[str]:
        """扫描项目文件"""
        files = []
        extensions = set(SUPPORTED_EXTENSIONS)

        for root, dirs, filenames in os.walk(project_path):
            # 跳过隐藏目录和常见的排除目录
            dirs[:] = [d for d in dirs if not d.startswith('.') and
                      d not in ['node_modules', 'build', 'dist', 'target']]

            for filename in filenames:
                if Path(filename).suffix.lower() in extensions:
                    files.append(os.path.join(root, filename))

        return files

    def _detect_language(self, file_path: str) -> str:
        """检测文件语言"""
        return detect_language_for_file(file_path) or 'unknown'

    def _extract_symbols_deep(self, files: List[str]) -> Dict:
        """深度符号提取（使用AST）"""
        if 'symbol' not in self.components:
            return {}

        extractor = self.components['symbol']
        result = extractor.extract_symbols_batch(files, method='ast')

        # 转换为旧格式（为了兼容性）
        symbols = {}
        for symbol_type, symbol_list in result.symbols.items():
            symbols[symbol_type] = {}
            for symbol_info in symbol_list:
                symbols[symbol_type][symbol_info.name] = {
                    'file': symbol_info.file,
                    'line': symbol_info.line,
                    'modifiers': symbol_info.modifiers
                }

        return symbols

    def _extract_symbols_basic(self, files: List[str]) -> Dict:
        """基础符号提取（使用正则）"""
        if 'symbol' not in self.components:
            return {}

        extractor = self.components['symbol']
        result = extractor.extract_symbols_batch(files, method='regex')

        # 转换格式
        symbols = {}
        for symbol_type, symbol_list in result.symbols.items():
            symbols[symbol_type] = {}
            for symbol_info in symbol_list:
                symbols[symbol_type][symbol_info.name] = {
                    'file': symbol_info.file
                }

        return symbols

    def _get_language_statistics(self, result: AnalysisResult) -> Dict:
        """获取语言统计信息"""
        stats = {}
        for file_info in result.files.values():
            lang = file_info.language
            if lang not in stats:
                stats[lang] = {'count': 0, 'size': 0}
            stats[lang]['count'] += 1
            stats[lang]['size'] += file_info.size
        return stats
