"""
代码分析工具模块

提供代码质量分析、依赖分析、代码度量等功能。
"""

import ast
import os
import re
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Set

from .base import FileTool, ToolResult, ToolError


class CodeQualityTool(FileTool):
    """代码质量分析工具"""
    
    def __init__(self):
        super().__init__(
            name="code_quality",
            description="分析代码质量",
            version="1.0.0"
        )
        self.supported_extensions = ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.cs', '.php', '.rb', '.go', '.rs']
    
    def get_parameters(self) -> Dict[str, Dict[str, Any]]:
        return {
            'file': {
                'type': str,
                'required': False,
                'description': '要分析的文件路径'
            },
            'directory': {
                'type': str,
                'required': False,
                'default': '.',
                'description': '要分析的目录路径'
            },
            'file_types': {
                'type': list,
                'required': False,
                'default': ['.py'],
                'description': '要分析的文件类型'
            },
            'checks': {
                'type': list,
                'required': False,
                'default': ['complexity', 'duplicates', 'style', 'security'],
                'choices': ['complexity', 'duplicates', 'style', 'security', 'performance'],
                'description': '要执行的检查类型'
            }
        }
    
    def execute(self, **kwargs) -> ToolResult:
        """执行代码质量分析"""
        start_time = datetime.now()
        
        try:
            if not self.validate_parameters(**kwargs):
                return self._create_result(False, error="参数验证失败")
            
            file_path = kwargs.get('file')
            directory = Path(kwargs.get('directory', '.'))
            file_types = kwargs.get('file_types', ['.py'])
            checks = kwargs.get('checks', ['complexity', 'duplicates', 'style', 'security'])
            
            if file_path:
                # 分析单个文件
                results = self._analyze_file(Path(file_path), checks)
            else:
                # 分析目录
                results = self._analyze_directory(directory, file_types, checks)
            
            execution_time = (datetime.now() - start_time).total_seconds()
            
            return self._create_result(
                success=True,
                data=results,
                message="代码质量分析完成",
                execution_time=execution_time
            )
            
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            return self._create_result(
                success=False,
                error=str(e),
                execution_time=execution_time
            )
    
    def _analyze_file(self, file_path: Path, checks: List[str]) -> Dict[str, Any]:
        """分析单个文件"""
        if not file_path.exists() or not file_path.is_file():
            raise ToolError(f"文件不存在: {file_path}", self.name)
        
        results = {
            'file': str(file_path),
            'analysis': {}
        }
        
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()
            
            for check in checks:
                if check == 'complexity':
                    results['analysis']['complexity'] = self._analyze_complexity(content, file_path.suffix)
                elif check == 'style':
                    results['analysis']['style'] = self._analyze_style(content, file_path.suffix)
                elif check == 'security':
                    results['analysis']['security'] = self._analyze_security(content, file_path.suffix)
                elif check == 'performance':
                    results['analysis']['performance'] = self._analyze_performance(content, file_path.suffix)
        
        except Exception as e:
            results['error'] = str(e)
        
        return results
    
    def _analyze_directory(self, directory: Path, file_types: List[str], checks: List[str]) -> Dict[str, Any]:
        """分析目录"""
        results = {
            'directory': str(directory),
            'files': [],
            'summary': {
                'total_files': 0,
                'total_issues': 0,
                'issues_by_type': {}
            }
        }
        
        for file_path in directory.rglob('*'):
            if file_path.is_file() and file_path.suffix in file_types:
                file_analysis = self._analyze_file(file_path, checks)
                results['files'].append(file_analysis)
                results['summary']['total_files'] += 1
        
        return results
    
    def _analyze_complexity(self, content: str, file_extension: str) -> Dict[str, Any]:
        """分析代码复杂度"""
        if file_extension == '.py':
            return self._analyze_python_complexity(content)
        else:
            return {'complexity': 'N/A', 'reason': f'不支持的文件类型: {file_extension}'}
    
    def _analyze_python_complexity(self, content: str) -> Dict[str, Any]:
        """分析 Python 代码复杂度"""
        try:
            tree = ast.parse(content)
            
            # 计算圈复杂度
            complexity = 1  # 基础复杂度
            
            for node in ast.walk(tree):
                if isinstance(node, (ast.If, ast.While, ast.For, ast.AsyncFor)):
                    complexity += 1
                elif isinstance(node, ast.ExceptHandler):
                    complexity += 1
                elif isinstance(node, ast.BoolOp):
                    complexity += len(node.values) - 1
            
            # 统计函数和类
            functions = len([n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)])
            classes = len([n for n in ast.walk(tree) if isinstance(n, ast.ClassDef)])
            
            return {
                'cyclomatic_complexity': complexity,
                'functions_count': functions,
                'classes_count': classes,
                'lines_of_code': len(content.splitlines()),
                'complexity_level': 'high' if complexity > 10 else 'medium' if complexity > 5 else 'low'
            }
        except SyntaxError as e:
            return {'error': f'语法错误: {e}'}
    
    def _analyze_style(self, content: str, file_extension: str) -> Dict[str, Any]:
        """分析代码风格"""
        issues = []
        lines = content.splitlines()
        
        # 检查行长度
        long_lines = [i+1 for i, line in enumerate(lines) if len(line) > 88]
        if long_lines:
            issues.append({
                'type': 'line_length',
                'message': f'行长度超过88字符: {long_lines[:5]}',
                'count': len(long_lines)
            })
        
        # 检查空行
        empty_lines = [i+1 for i, line in enumerate(lines) if line.strip() == '']
        if len(empty_lines) > len(lines) * 0.3:
            issues.append({
                'type': 'too_many_empty_lines',
                'message': f'空行过多: {len(empty_lines)}/{len(lines)}',
                'count': len(empty_lines)
            })
        
        # 检查缩进
        inconsistent_indent = []
        for i, line in enumerate(lines):
            if line.strip() and not line.startswith((' ', '\t')):
                continue
            if line.startswith(' ') and '\t' in line:
                inconsistent_indent.append(i+1)
        
        if inconsistent_indent:
            issues.append({
                'type': 'inconsistent_indent',
                'message': f'缩进不一致: {inconsistent_indent[:5]}',
                'count': len(inconsistent_indent)
            })
        
        return {
            'issues': issues,
            'total_issues': len(issues),
            'style_score': max(0, 100 - len(issues) * 10)
        }
    
    def _analyze_security(self, content: str, file_extension: str) -> Dict[str, Any]:
        """分析安全漏洞"""
        issues = []
        
        # 检查危险函数
        dangerous_patterns = {
            'eval': r'\beval\s*\(',
            'exec': r'\bexec\s*\(',
            'subprocess': r'subprocess\.(call|run|Popen)',
            'os.system': r'os\.system\s*\(',
            'shell=True': r'shell\s*=\s*True'
        }
        
        for pattern_name, pattern in dangerous_patterns.items():
            matches = re.findall(pattern, content, re.IGNORECASE)
            if matches:
                issues.append({
                    'type': 'security_risk',
                    'pattern': pattern_name,
                    'message': f'发现潜在安全风险: {pattern_name}',
                    'count': len(matches)
                })
        
        # 检查硬编码密码
        password_patterns = [
            r'password\s*=\s*["\'][^"\']+["\']',
            r'passwd\s*=\s*["\'][^"\']+["\']',
            r'pwd\s*=\s*["\'][^"\']+["\']'
        ]
        
        for pattern in password_patterns:
            matches = re.findall(pattern, content, re.IGNORECASE)
            if matches:
                issues.append({
                    'type': 'hardcoded_password',
                    'message': '发现硬编码密码',
                    'count': len(matches)
                })
        
        return {
            'issues': issues,
            'total_issues': len(issues),
            'security_score': max(0, 100 - len(issues) * 20)
        }
    
    def _analyze_performance(self, content: str, file_extension: str) -> Dict[str, Any]:
        """分析性能问题"""
        issues = []
        
        # 检查循环中的函数调用
        if file_extension == '.py':
            # 简化的性能分析
            if 'for' in content and 'def ' in content:
                issues.append({
                    'type': 'potential_performance_issue',
                    'message': '可能存在循环中的函数调用',
                    'count': 1
                })
        
        return {
            'issues': issues,
            'total_issues': len(issues),
            'performance_score': max(0, 100 - len(issues) * 15)
        }


class DependencyAnalyzer(FileTool):
    """依赖分析工具"""
    
    def __init__(self):
        super().__init__(
            name="dependency_analyzer",
            description="分析项目依赖",
            version="1.0.0"
        )
    
    def get_parameters(self) -> Dict[str, Dict[str, Any]]:
        return {
            'directory': {
                'type': str,
                'required': False,
                'default': '.',
                'description': '项目目录'
            },
            'include_dev': {
                'type': bool,
                'required': False,
                'default': False,
                'description': '是否包含开发依赖'
            },
            'check_updates': {
                'type': bool,
                'required': False,
                'default': False,
                'description': '检查依赖更新'
            }
        }
    
    def execute(self, **kwargs) -> ToolResult:
        """执行依赖分析"""
        start_time = datetime.now()
        
        try:
            if not self.validate_parameters(**kwargs):
                return self._create_result(False, error="参数验证失败")
            
            directory = Path(kwargs.get('directory', '.'))
            include_dev = kwargs.get('include_dev', False)
            check_updates = kwargs.get('check_updates', False)
            
            # 分析依赖
            dependencies = self._analyze_dependencies(directory, include_dev)
            
            execution_time = (datetime.now() - start_time).total_seconds()
            
            return self._create_result(
                success=True,
                data=dependencies,
                message="依赖分析完成",
                execution_time=execution_time
            )
            
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            return self._create_result(
                success=False,
                error=str(e),
                execution_time=execution_time
            )
    
    def _analyze_dependencies(self, directory: Path, include_dev: bool) -> Dict[str, Any]:
        """分析项目依赖"""
        dependencies = {
            'python': self._analyze_python_dependencies(directory, include_dev),
            'node': self._analyze_node_dependencies(directory, include_dev),
            'summary': {}
        }
        
        # 计算总结
        total_deps = 0
        for lang, deps in dependencies.items():
            if isinstance(deps, dict) and 'dependencies' in deps:
                total_deps += len(deps['dependencies'])
        
        dependencies['summary'] = {
            'total_dependencies': total_deps,
            'languages': [lang for lang, deps in dependencies.items() 
                         if isinstance(deps, dict) and 'dependencies' in deps]
        }
        
        return dependencies
    
    def _analyze_python_dependencies(self, directory: Path, include_dev: bool) -> Dict[str, Any]:
        """分析 Python 依赖"""
        requirements_files = [
            'requirements.txt',
            'requirements-dev.txt',
            'pyproject.toml',
            'setup.py',
            'Pipfile'
        ]
        
        dependencies = []
        found_files = []
        
        for req_file in requirements_files:
            file_path = directory / req_file
            if file_path.exists():
                found_files.append(req_file)
                deps = self._parse_requirements_file(file_path)
                dependencies.extend(deps)
        
        return {
            'dependencies': dependencies,
            'files_found': found_files,
            'total_count': len(dependencies)
        }
    
    def _parse_requirements_file(self, file_path: Path) -> List[Dict[str, Any]]:
        """解析 requirements 文件"""
        dependencies = []
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line and not line.startswith('#'):
                        # 简单的依赖解析
                        if '==' in line:
                            name, version = line.split('==', 1)
                            dependencies.append({
                                'name': name.strip(),
                                'version': version.strip(),
                                'constraint': '=='
                            })
                        elif '>=' in line:
                            name, version = line.split('>=', 1)
                            dependencies.append({
                                'name': name.strip(),
                                'version': version.strip(),
                                'constraint': '>='
                            })
                        else:
                            dependencies.append({
                                'name': line,
                                'version': 'latest',
                                'constraint': 'none'
                            })
        except Exception as e:
            self._log_warning(f"无法解析文件 {file_path}: {e}")
        
        return dependencies
    
    def _analyze_node_dependencies(self, directory: Path, include_dev: bool) -> Dict[str, Any]:
        """分析 Node.js 依赖"""
        package_json = directory / 'package.json'
        
        if not package_json.exists():
            return {'dependencies': [], 'files_found': [], 'total_count': 0}
        
        try:
            import json
            with open(package_json, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            dependencies = []
            
            # 生产依赖
            if 'dependencies' in data:
                for name, version in data['dependencies'].items():
                    dependencies.append({
                        'name': name,
                        'version': version,
                        'type': 'production'
                    })
            
            # 开发依赖
            if include_dev and 'devDependencies' in data:
                for name, version in data['devDependencies'].items():
                    dependencies.append({
                        'name': name,
                        'version': version,
                        'type': 'development'
                    })
            
            return {
                'dependencies': dependencies,
                'files_found': ['package.json'],
                'total_count': len(dependencies)
            }
        except Exception as e:
            return {'error': f'无法解析 package.json: {e}'}


class CodeMetricsTool(FileTool):
    """代码度量工具"""
    
    def __init__(self):
        super().__init__(
            name="code_metrics",
            description="计算代码度量指标",
            version="1.0.0"
        )
        self.supported_extensions = ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.cs', '.php', '.rb', '.go', '.rs']
    
    def get_parameters(self) -> Dict[str, Dict[str, Any]]:
        return {
            'directory': {
                'type': str,
                'required': False,
                'default': '.',
                'description': '要分析的目录'
            },
            'file_types': {
                'type': list,
                'required': False,
                'default': ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.cs', '.php', '.rb', '.go', '.rs'],
                'description': '要分析的文件类型'
            },
            'include_tests': {
                'type': bool,
                'required': False,
                'default': True,
                'description': '是否包含测试文件'
            }
        }
    
    def execute(self, **kwargs) -> ToolResult:
        """执行代码度量"""
        start_time = datetime.now()
        
        try:
            if not self.validate_parameters(**kwargs):
                return self._create_result(False, error="参数验证失败")
            
            directory = Path(kwargs.get('directory', '.'))
            file_types = kwargs.get('file_types', ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.cs', '.php', '.rb', '.go', '.rs'])
            include_tests = kwargs.get('include_tests', True)
            
            # 计算度量
            metrics = self._calculate_metrics(directory, file_types, include_tests)
            
            execution_time = (datetime.now() - start_time).total_seconds()
            
            return self._create_result(
                success=True,
                data=metrics,
                message="代码度量计算完成",
                execution_time=execution_time
            )
            
        except Exception as e:
            execution_time = (datetime.now() - start_time).total_seconds()
            return self._create_result(
                success=False,
                error=str(e),
                execution_time=execution_time
            )
    
    def _calculate_metrics(self, directory: Path, file_types: List[str], include_tests: bool) -> Dict[str, Any]:
        """计算代码度量"""
        metrics = {
            'files': [],
            'summary': {
                'total_files': 0,
                'total_lines': 0,
                'total_code_lines': 0,
                'total_comment_lines': 0,
                'total_blank_lines': 0,
                'languages': {}
            }
        }
        
        for file_path in directory.rglob('*'):
            if not file_path.is_file() or file_path.suffix not in file_types:
                continue
            
            # 跳过测试文件（如果不需要）
            if not include_tests and self._is_test_file(file_path):
                continue
            
            file_metrics = self._calculate_file_metrics(file_path)
            metrics['files'].append(file_metrics)
            
            # 更新总结
            metrics['summary']['total_files'] += 1
            metrics['summary']['total_lines'] += file_metrics['total_lines']
            metrics['summary']['total_code_lines'] += file_metrics['code_lines']
            metrics['summary']['total_comment_lines'] += file_metrics['comment_lines']
            metrics['summary']['total_blank_lines'] += file_metrics['blank_lines']
            
            # 按语言统计
            lang = file_path.suffix[1:] if file_path.suffix else 'unknown'
            if lang not in metrics['summary']['languages']:
                metrics['summary']['languages'][lang] = {
                    'files': 0,
                    'lines': 0,
                    'code_lines': 0
                }
            
            metrics['summary']['languages'][lang]['files'] += 1
            metrics['summary']['languages'][lang]['lines'] += file_metrics['total_lines']
            metrics['summary']['languages'][lang]['code_lines'] += file_metrics['code_lines']
        
        return metrics
    
    def _is_test_file(self, file_path: Path) -> bool:
        """判断是否为测试文件"""
        test_patterns = ['test_', '_test', '.test.', 'spec.', '.spec']
        return any(pattern in file_path.name for pattern in test_patterns)
    
    def _calculate_file_metrics(self, file_path: Path) -> Dict[str, Any]:
        """计算单个文件的度量"""
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                lines = f.readlines()
            
            total_lines = len(lines)
            code_lines = 0
            comment_lines = 0
            blank_lines = 0
            
            for line in lines:
                stripped = line.strip()
                if not stripped:
                    blank_lines += 1
                elif stripped.startswith('#') or stripped.startswith('//') or stripped.startswith('/*'):
                    comment_lines += 1
                else:
                    code_lines += 1
            
            return {
                'file': str(file_path),
                'total_lines': total_lines,
                'code_lines': code_lines,
                'comment_lines': comment_lines,
                'blank_lines': blank_lines,
                'comment_ratio': comment_lines / total_lines if total_lines > 0 else 0
            }
        except Exception as e:
            return {
                'file': str(file_path),
                'error': str(e)
            }
