#!/usr/bin/env python3
"""
CodeQL Automated Scanning Pipeline
Used to scan projects in datasets-lang directory and generate security analysis reports
"""

import os
import sys
import json
import yaml
import shutil
import subprocess
import argparse
import logging
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import time


class CodeQLPipeline:
    """CodeQL Automated Scanning Pipeline"""
    
    def __init__(self, config_path: str = None):
        """Initialize pipeline"""
        self.config = self._load_config(config_path)
        self.setup_logging()
        self.results_dir = Path(self.config['output']['results_dir'])
        self.results_dir.mkdir(parents=True, exist_ok=True)
        
        # Language mapping
        self.language_mapping = {
            'Python': 'python',
            'TypeScript': 'javascript',  # TypeScript uses javascript extractor
            'JavaScript': 'javascript',
            'Go': 'go',
            'Rust': 'rust',
            'Java': 'java',
            'C#': 'csharp',
            'C++': 'cpp',
            'C': 'cpp',
            'PHP': 'php',
            'Ruby': 'ruby',
            'Swift': 'swift',
            'Kotlin': 'java',  # Kotlin uses java extractor
            'Scala': 'java',   # Scala uses java extractor
            'Dart': 'dart',
            'R': 'r',
            'MATLAB': 'matlab',
            'Shell': 'bash',
            'PowerShell': 'powershell',
            'HTML': 'html',
            'CSS': 'css',
            'Vue': 'javascript',  # Vue uses javascript extractor
            'Svelte': 'javascript',  # Svelte uses javascript extractor
            'Elixir': 'elixir',
            'Clojure': 'clojure',
            'Haskell': 'haskell',
            'OCaml': 'ocaml',
            'F#': 'fsharp',
            'Erlang': 'erlang',
            'Lua': 'lua',
            'Perl': 'perl',
            'Assembly': 'asm',
            'Dockerfile': 'dockerfile',
            'YAML': 'yaml',
            'JSON': 'json',
            'XML': 'xml',
            'Markdown': 'markdown',
            'Text': 'text',
            'Config': 'config',
            'Other': 'other'
        }
        
        # Statistics
        self.stats = {
            'total_projects': 0,
            'scanned_projects': 0,
            'failed_projects': 0,
            'skipped_projects': 0,
            'start_time': None,
            'end_time': None,
            'languages': {}
        }
    
    def _load_config(self, config_path: str = None) -> Dict:
        """Load configuration file"""
        if config_path is None:
            config_path = Path(__file__).parent / 'config.yaml'
        
        default_config = {
            'input': {
                'datasets_lang_dir': '/Volumes/拯救者PSSD/mcp_security/datasets-lang',
                'max_projects_per_language': 50,  # Maximum projects per language
                'max_projects_per_batch': 10,     # Maximum projects per batch
            },
            'codeql': {
                'cli_path': '/Users/winegee/Desktop/mcp_security/codeql/codeql',
                'query_suite': 'security-and-quality',
                'timeout': 360000,  # Timeout per project scan (seconds)
                'memory': '4G',   # Memory limit
                'threads': 4,     # Number of threads
            },
            'output': {
                'results_dir': '/Volumes/拯救者PSSD/mcp_security/script/results',
                'reports_dir': '/Volumes/拯救者PSSD/mcp_security/script/reports',
                'databases_dir': '/Volumes/拯救者PSSD/mcp_security/script/databases',
                'generate_sarif': True,
                'generate_html': True,
                'generate_json': True,
            },
            'pipeline': {
                'parallel_workers': 2,  # Number of parallel workers
                'retry_failed': True,
                'max_retries': 2,
                'cleanup_databases': True,
            }
        }
        
        if Path(config_path).exists():
            with open(config_path, 'r', encoding='utf-8') as f:
                user_config = yaml.safe_load(f)
                # Recursively merge configuration
                self._merge_config(default_config, user_config)
        
        return default_config
    
    def _merge_config(self, default: Dict, user: Dict):
        """Recursively merge configuration"""
        for key, value in user.items():
            if key in default and isinstance(default[key], dict) and isinstance(value, dict):
                self._merge_config(default[key], value)
            else:
                default[key] = value
    
    def setup_logging(self):
        """Setup logging"""
        log_dir = Path(self.config['output']['results_dir']) / 'logs'
        log_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        log_file = log_dir / f'codeql_pipeline_{timestamp}.log'
        error_log_file = log_dir / f'codeql_errors_{timestamp}.log'
        
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s',
            handlers=[
                logging.FileHandler(log_file, encoding='utf-8'),
                logging.StreamHandler(sys.stdout)
            ]
        )
        self.logger = logging.getLogger(__name__)
        self.error_log_file = error_log_file
    
    def log_detailed_error(self, error_type: str, project_name: str, command: List[str], 
                          return_code: int = None, stderr: str = None, stdout: str = None, 
                          exception: Exception = None):
        """Log detailed error information to error log file"""
        try:
            with open(self.error_log_file, 'a', encoding='utf-8') as f:
                f.write(f"\n{'='*80}\n")
                f.write(f"ERROR TYPE: {error_type}\n")
                f.write(f"PROJECT: {project_name}\n")
                f.write(f"TIMESTAMP: {datetime.now().isoformat()}\n")
                f.write(f"COMMAND: {' '.join(command)}\n")
                
                if return_code is not None:
                    f.write(f"RETURN CODE: {return_code}\n")
                
                if stderr:
                    f.write(f"STDERR:\n{stderr}\n")
                
                if stdout:
                    f.write(f"STDOUT:\n{stdout}\n")
                
                if exception:
                    f.write(f"EXCEPTION TYPE: {type(exception).__name__}\n")
                    f.write(f"EXCEPTION MESSAGE: {str(exception)}\n")
                
                f.write(f"{'='*80}\n")
        except Exception as e:
            self.logger.error(f"Failed to write to error log file: {e}")
    
    def _is_valid_codeql_database(self, db_path: Path) -> bool:
        """Check if a CodeQL database exists and is valid"""
        if not db_path.exists():
            return False
        
        # Check if directory is not empty
        if not any(db_path.iterdir()):
            return False
        
        # Use CodeQL to validate the database
        try:
            cmd = [
                self.config['codeql']['cli_path'],
                'database', 'check',
                str(db_path)
            ]
            
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=True,
                timeout=30
            )
            
            # If command succeeds, database is valid
            return result.returncode == 0
            
        except (subprocess.TimeoutExpired, FileNotFoundError, Exception):
            # If CodeQL check fails, assume database is invalid
            return False
    
    def _cleanup_macos_backup_files(self, db_path: Path):
        """Clean up macOS backup files (._*) from CodeQL database"""
        try:
            if not db_path.exists():
                return
            
            # Recursively find and remove all ._* files
            for file_path in db_path.rglob('._*'):
                try:
                    file_path.unlink()
                    self.logger.debug(f"Removed macOS backup file: {file_path}")
                except Exception as e:
                    self.logger.warning(f"Failed to remove backup file {file_path}: {e}")
                    
        except Exception as e:
            self.logger.warning(f"Failed to cleanup macOS backup files in {db_path}: {e}")
    
    def _sarif_file_exists(self, project_name: str, language: str) -> bool:
        """Check if SARIF file already exists for the project"""
        try:
            results_dir = Path(self.config['output']['results_dir']) / language
            sarif_file = results_dir / f'{project_name}.sarif'
            
            if sarif_file.exists() and sarif_file.stat().st_size > 0:
                self.logger.info(f"SARIF file already exists: {sarif_file}, skipping project {project_name}")
                return True
            
            return False
            
        except Exception as e:
            self.logger.warning(f"Failed to check SARIF file existence for {project_name}: {e}")
            return False
    
    def _get_language_query_suite(self, language: str, base_suite: str) -> str:
        """Get language-specific query suite name"""
        # Map language to CodeQL language and query suite
        language_mapping = {
            'Python': 'python',
            'TypeScript': 'javascript',
            'JavaScript': 'javascript',
            'Go': 'go',
            'Rust': 'rust',
            'Java': 'java',
            'C#': 'csharp',
            'C++': 'cpp',
            'C': 'cpp',
            'PHP': 'php',
            'Ruby': 'ruby',
            'Swift': 'swift',
            'Kotlin': 'java',
            'Scala': 'java',
            'Dart': 'dart',
            'R': 'r',
            'MATLAB': 'matlab',
            'Shell': 'bash',
            'PowerShell': 'powershell',
            'HTML': 'html',
            'CSS': 'css',
            'Vue': 'javascript',
            'Svelte': 'javascript',
            'Elixir': 'elixir',
            'Clojure': 'clojure',
            'Haskell': 'haskell',
            'OCaml': 'ocaml',
            'F#': 'fsharp',
            'Lua': 'lua',
            'Perl': 'perl',
            'XML': 'xml',
            'YAML': 'yaml',
            'JSON': 'json',
            'Markdown': 'markdown',
            'Text': 'text',
            'Config': 'config',
            'Other': 'other'
        }
        
        codeql_language = language_mapping.get(language, 'other')
        
        # Map base suite to language-specific suite
        if base_suite == 'security-and-quality':
            if codeql_language == 'javascript':
                return 'javascript-security-and-quality.qls'
            elif codeql_language == 'python':
                return 'python-security-and-quality.qls'
            elif codeql_language == 'java':
                return 'java-security-and-quality.qls'
            elif codeql_language == 'cpp':
                return 'cpp-security-and-quality.qls'
            elif codeql_language == 'go':
                return 'go-security-and-quality.qls'
            elif codeql_language == 'rust':
                return 'rust-security-and-quality.qls'
            elif codeql_language == 'csharp':
                return 'csharp-security-and-quality.qls'
            # Add more language mappings as needed
            else:
                self.logger.warning(f"No specific query suite found for language {language}, using default")
                return None
        
        return base_suite
    
    def discover_projects(self, target_languages: List[str] = None, max_projects_per_language: int = None) -> Dict[str, List[Path]]:
        """Discover all projects that need to be scanned
        
        Args:
            target_languages: List of specific languages to scan (if None, scan all)
            max_projects_per_language: Maximum number of projects per language (if None, use config default)
        """
        datasets_dir = Path(self.config['input']['datasets_lang_dir'])
        projects_by_language = {}
        
        if not datasets_dir.exists():
            self.logger.error(f"Dataset directory does not exist: {datasets_dir}")
            return projects_by_language
        
        # Use provided max_projects or config default
        if max_projects_per_language is None:
            max_projects_per_language = self.config['input']['max_projects_per_language']
        
        self.logger.info(f"Starting project discovery, directory: {datasets_dir}")
        if target_languages:
            self.logger.info(f"Target languages: {target_languages}")
        self.logger.info(f"Max projects per language: {max_projects_per_language}")
        
        for language_dir in datasets_dir.iterdir():
            if not language_dir.is_dir() or language_dir.name.startswith('.'):
                continue
            
            language = language_dir.name
            
            # Filter by target languages if specified
            if target_languages and language not in target_languages:
                self.logger.debug(f"Skipping language {language} (not in target languages)")
                continue
            
            projects = []
            
            # Traverse all partitions in language directory
            for partition_dir in language_dir.iterdir():
                if not partition_dir.is_dir() or not partition_dir.name.isdigit():
                    continue
                
                # Traverse all projects in partition
                for project_dir in partition_dir.iterdir():
                    if not project_dir.is_dir() or project_dir.name.startswith('.'):
                        continue
                    
                    projects.append(project_dir)
            
            if projects:
                # Note: No longer limiting projects per language due to skip mechanism
                # Projects with existing SARIF files will be automatically skipped
                projects_by_language[language] = projects
                self.logger.info(f"Found {language} language projects: {len(projects)} projects (skip mechanism enabled)")
        
        total_projects = sum(len(projects) for projects in projects_by_language.values())
        self.stats['total_projects'] = total_projects
        self.logger.info(f"Total discovered {total_projects} projects across {len(projects_by_language)} languages")
        
        return projects_by_language
    
    def create_codeql_database(self, project_path: Path, language: str) -> Optional[Path]:
        """Create CodeQL database"""
        try:
            # Determine CodeQL language
            codeql_language = self.language_mapping.get(language, 'other')
            if codeql_language == 'other':
                self.logger.warning(f"Unsupported language: {language}, skipping project {project_path.name}")
                return None
            
            # Create database directory
            db_dir = Path(self.config['output']['databases_dir']) / language / project_path.name
            
            # Check if database already exists and is valid
            if db_dir.exists():
                # Clean up any existing macOS backup files first
                self._cleanup_macos_backup_files(db_dir)
                
                if self._is_valid_codeql_database(db_dir):
                    self.logger.info(f"Valid CodeQL database already exists: {db_dir}, skipping creation")
                    return db_dir
            
            db_dir.mkdir(parents=True, exist_ok=True)
            
            # Build CodeQL command
            cmd = [
                self.config['codeql']['cli_path'],
                'database', 'create',
                str(db_dir),
                '--language', codeql_language,
                '--source-root', str(project_path),
                '--threads', str(self.config['codeql']['threads']),
                '--overwrite'  # Add overwrite flag to handle existing databases
            ]
            
            self.logger.info(f"Creating database: {project_path.name} ({language})")
            
            # Execute command
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=True,
                timeout=self.config['codeql']['timeout']
            )
            
            if result.returncode == 0:
                self.logger.info(f"Database created successfully: {db_dir}")
                
                # Clean up macOS backup files
                self._cleanup_macos_backup_files(db_dir)
                
                return db_dir
            else:
                self.logger.error(f"Database creation failed: {project_path.name}")
                self.logger.error(f"Return code: {result.returncode}")
                self.logger.error(f"Command: {' '.join(cmd)}")
                self.logger.error(f"Error output: {result.stderr}")
                if result.stdout:
                    self.logger.error(f"Standard output: {result.stdout}")
                
                # Log detailed error to file
                self.log_detailed_error(
                    error_type="Database Creation Failed",
                    project_name=project_path.name,
                    command=cmd,
                    return_code=result.returncode,
                    stderr=result.stderr,
                    stdout=result.stdout
                )
                return None
                
        except subprocess.TimeoutExpired as e:
            self.logger.error(f"Database creation timeout: {project_path.name}")
            self.logger.error(f"Timeout after {self.config['codeql']['timeout']} seconds")
            self.logger.error(f"Command: {' '.join(cmd)}")
            
            # Log detailed error to file
            self.log_detailed_error(
                error_type="Database Creation Timeout",
                project_name=project_path.name,
                command=cmd,
                exception=e
            )
            return None
        except Exception as e:
            self.logger.error(f"Database creation exception: {project_path.name}")
            self.logger.error(f"Exception type: {type(e).__name__}")
            self.logger.error(f"Exception message: {str(e)}")
            self.logger.error(f"Command: {' '.join(cmd)}")
            
            # Log detailed error to file
            self.log_detailed_error(
                error_type="Database Creation Exception",
                project_name=project_path.name,
                command=cmd,
                exception=e
            )
            return None
    
    def run_codeql_analysis(self, db_path: Path, language: str, project_name: str) -> Optional[Path]:
        """Run CodeQL analysis"""
        try:
            # Create results directory
            results_dir = Path(self.config['output']['results_dir']) / language
            results_dir.mkdir(parents=True, exist_ok=True)
            
            # Build analysis command
            cmd = [
                self.config['codeql']['cli_path'],
                'database', 'analyze',
                '--format', 'sarif-latest',
                '--output', str(results_dir / f'{project_name}.sarif'),
                '--threads', str(self.config['codeql']['threads']),
                '--ram', str(int(self.config['codeql']['memory'].replace('G', '')) * 1024),  # Convert GB to MB
                str(db_path)  # Database path
            ]
            
            # Add query suite (if specified)
            if self.config['codeql']['query_suite']:
                # Map language to appropriate query suite
                query_suite = self._get_language_query_suite(language, self.config['codeql']['query_suite'])
                if query_suite:
                    cmd.append(query_suite)
            
            self.logger.info(f"Starting analysis: {project_name} ({language})")
            
            # Execute command
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=True,
                timeout=self.config['codeql']['timeout']
            )
            
            if result.returncode == 0:
                sarif_file = results_dir / f'{project_name}.sarif'
                if sarif_file.exists():
                    self.logger.info(f"Analysis completed: {sarif_file}")
                    return sarif_file
                else:
                    self.logger.warning(f"Analysis completed but SARIF file not generated: {project_name}")
                    return None
            else:
                self.logger.error(f"Analysis failed: {project_name}")
                self.logger.error(f"Return code: {result.returncode}")
                self.logger.error(f"Command: {' '.join(cmd)}")
                self.logger.error(f"Error output: {result.stderr}")
                if result.stdout:
                    self.logger.error(f"Standard output: {result.stdout}")
                
                # Log detailed error to file
                self.log_detailed_error(
                    error_type="Analysis Failed",
                    project_name=project_name,
                    command=cmd,
                    return_code=result.returncode,
                    stderr=result.stderr,
                    stdout=result.stdout
                )
                return None
                
        except subprocess.TimeoutExpired as e:
            self.logger.error(f"Analysis timeout: {project_name}")
            self.logger.error(f"Timeout after {self.config['codeql']['timeout']} seconds")
            self.logger.error(f"Command: {' '.join(cmd)}")
            
            # Log detailed error to file
            self.log_detailed_error(
                error_type="Analysis Timeout",
                project_name=project_name,
                command=cmd,
                exception=e
            )
            return None
        except Exception as e:
            self.logger.error(f"Analysis exception: {project_name}")
            self.logger.error(f"Exception type: {type(e).__name__}")
            self.logger.error(f"Exception message: {str(e)}")
            self.logger.error(f"Command: {' '.join(cmd)}")
            
            # Log detailed error to file
            self.log_detailed_error(
                error_type="Analysis Exception",
                project_name=project_name,
                command=cmd,
                exception=e
            )
            return None
    
    def scan_project(self, project_path: Path, language: str) -> Dict:
        """Scan single project"""
        project_name = project_path.name
        result = {
            'project_name': project_name,
            'language': language,
            'project_path': str(project_path),
            'status': 'failed',
            'database_path': None,
            'sarif_path': None,
            'error': None,
            'start_time': datetime.now().isoformat(),
            'end_time': None
        }
        
        try:
            # Check if SARIF file already exists
            if self._sarif_file_exists(project_name, language):
                result['status'] = 'skipped'
                result['error'] = 'SARIF file already exists'
                result['end_time'] = datetime.now().isoformat()
                return result
            # Creating database (or using existing one)
            db_path = self.create_codeql_database(project_path, language)
            if db_path is None:
                result['error'] = 'Database creation failed'
                return result
            
            # Check if we're using an existing database
            result['database_status'] = 'existing' if self._is_valid_codeql_database(db_path) else 'created'
            
            result['database_path'] = str(db_path)
            
            # Run analysis
            sarif_path = self.run_codeql_analysis(db_path, language, project_name)
            if sarif_path is None:
                result['error'] = 'Analysis failed'
                return result
            
            result['sarif_path'] = str(sarif_path)
            result['status'] = 'success'
            
            if self.config['pipeline']['cleanup_databases']:
                shutil.rmtree(db_path, ignore_errors=True)
                result['database_path'] = None
            
        except Exception as e:
            result['error'] = str(e)
            self.logger.error(f"Scan project exception: {project_name}, error: {e}")
        
        finally:
            result['end_time'] = datetime.now().isoformat()
        
        return result
    
    def scan_language_batch(self, language: str, projects: List[Path]) -> List[Dict]:
        """Scan all projects of one language"""
        self.logger.info(f"Starting scan {language} language {len(projects)} projects")
        
        results = []
        max_batch_size = self.config['input']['max_projects_per_batch']
        
        # Process in batches
        for i in range(0, len(projects), max_batch_size):
            batch = projects[i:i + max_batch_size]
            self.logger.info(f"Processing {language} language batch {i//max_batch_size + 1} containing {len(batch)} projects")
            
            # Parallel scanning
            with ThreadPoolExecutor(max_workers=self.config['pipeline']['parallel_workers']) as executor:
                future_to_project = {
                    executor.submit(self.scan_project, project, language): project 
                    for project in batch
                }
                
                for future in as_completed(future_to_project):
                    project = future_to_project[future]
                    try:
                        result = future.result()
                        results.append(result)
                        
                        if result['status'] == 'success':
                            self.stats['scanned_projects'] += 1
                            self.logger.info(f"✓ Scan successful: {result['project_name']}")
                        elif result['status'] == 'skipped':
                            self.stats['skipped_projects'] += 1
                            self.logger.info(f"⏭ Scan skipped: {result['project_name']} - {result.get('error', 'SARIF file already exists')}")
                        else:
                            self.stats['failed_projects'] += 1
                            self.logger.error(f"✗ Scan failed: {result['project_name']} - {result['error']}")
                            
                    except Exception as e:
                        self.stats['failed_projects'] += 1
                        self.logger.error(f"✗ Scan exception: {project.name} - {e}")
                        results.append({
                            'project_name': project.name,
                            'language': language,
                            'project_path': str(project),
                            'status': 'failed',
                            'error': str(e),
                            'start_time': datetime.now().isoformat(),
                            'end_time': datetime.now().isoformat()
                        })
        
        # Update language statistics
        self.stats['languages'][language] = {
            'total': len(projects),
            'success': len([r for r in results if r['status'] == 'success']),
            'failed': len([r for r in results if r['status'] == 'failed']),
            'skipped': len([r for r in results if r['status'] == 'skipped'])
        }
        
        self.logger.info(f"{language} language scan completed: success {self.stats['languages'][language]['success']}, failed {self.stats['languages'][language]['failed']}, skipped {self.stats['languages'][language]['skipped']}")
        return results
    
    def generate_reports(self, all_results: List[Dict]):
        """Generate reports"""
        self.logger.info("Starting report generation")
        
        reports_dir = Path(self.config['output']['reports_dir'])
        reports_dir.mkdir(parents=True, exist_ok=True)
        
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        
        # Generate JSON report
        if self.config['output']['generate_json']:
            json_report = {
                'pipeline_info': {
                    'timestamp': timestamp,
                    'config': self.config,
                    'stats': self.stats
                },
                'results': all_results
            }
            
            json_file = reports_dir / f'codeql_scan_report_{timestamp}.json'
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(json_report, f, indent=2, ensure_ascii=False)
            self.logger.info(f"JSON report generated: {json_file}")
        
        # Generate HTML report
        if self.config['output']['generate_html']:
            html_file = reports_dir / f'codeql_scan_report_{timestamp}.html'
            self._generate_html_report(all_results, html_file, timestamp)
            self.logger.info(f"HTML report generated: {html_file}")
        
        # Generate summary report
        summary_file = reports_dir / f'codeql_scan_summary_{timestamp}.txt'
        self._generate_summary_report(all_results, summary_file, timestamp)
        self.logger.info(f"Summary report generated: {summary_file}")
    
    def _generate_html_report(self, results: List[Dict], output_file: Path, timestamp: str):
        """Generate HTML report"""
        html_content = f"""
<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>CodeQL扫描报告 - {timestamp}</title>
    <style>
        body {{ font-family: Arial, sans-serif; margin: 20px; }}
        .header {{ background-color: #f0f0f0; padding: 20px; border-radius: 5px; }}
        .stats {{ display: flex; gap: 20px; margin: 20px 0; }}
        .stat-card {{ background-color: #e8f4fd; padding: 15px; border-radius: 5px; flex: 1; }}
        .results-table {{ width: 100%; border-collapse: collapse; margin-top: 20px; }}
        .results-table th, .results-table td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
        .results-table th {{ background-color: #f2f2f2; }}
        .status-success {{ color: green; font-weight: bold; }}
        .status-failed {{ color: red; font-weight: bold; }}
        .language-section {{ margin: 30px 0; }}
        .language-header {{ background-color: #d4edda; padding: 10px; border-radius: 5px; }}
    </style>
</head>
<body>
    <div class="header">
        <h1>CodeQL安全扫描报告</h1>
        <p>生成时间: {timestamp}</p>
        <p>扫描配置: {self.config['codeql']['query_suite']}</p>
    </div>
    
    <div class="stats">
        <div class="stat-card">
            <h3>总项目数</h3>
            <p>{self.stats['total_projects']}</p>
        </div>
        <div class="stat-card">
            <h3>Scan successful</h3>
            <p class="status-success">{self.stats['scanned_projects']}</p>
        </div>
        <div class="stat-card">
            <h3>Scan failed</h3>
            <p class="status-failed">{self.stats['failed_projects']}</p>
        </div>
        <div class="stat-card">
            <h3>涉及Language</h3>
            <p>{len(self.stats['languages'])}</p>
        </div>
    </div>
    
    <h2>按Language分类结果</h2>
"""
        
        # Group results by language
        results_by_language = {}
        for result in results:
            lang = result['language']
            if lang not in results_by_language:
                results_by_language[lang] = []
            results_by_language[lang].append(result)
        
        # Generate language group report
        for language, lang_results in results_by_language.items():
            success_count = len([r for r in lang_results if r['status'] == 'success'])
            failed_count = len([r for r in lang_results if r['status'] == 'failed'])
            
            html_content += f"""
    <div class="language-section">
        <div class="language-header">
            <h3>{language} Language</h3>
            <p>总计: {len(lang_results)} | success: {success_count} | failed: {failed_count}</p>
        </div>
        
        <table class="results-table">
            <thead>
                <tr>
                    <th>项目名称</th>
                    <th>状态</th>
                    <th>SARIF文件</th>
                    <th>error信息</th>
                    <th>扫描时间</th>
                </tr>
            </thead>
            <tbody>
"""
            
            for result in lang_results:
                status_class = 'status-success' if result['status'] == 'success' else 'status-failed'
                sarif_link = f'<a href="{result["sarif_path"]}" target="_blank">查看</a>' if result['sarif_path'] else '无'
                error_info = result.get('error', '') if result['status'] == 'failed' else ''
                
                html_content += f"""
                <tr>
                    <td>{result['project_name']}</td>
                    <td class="{status_class}">{result['status']}</td>
                    <td>{sarif_link}</td>
                    <td>{error_info}</td>
                    <td>{result['start_time']}</td>
                </tr>
"""
            
            html_content += """
            </tbody>
        </table>
    </div>
"""
        
        html_content += """
</body>
</html>
"""
        
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(html_content)
    
    def _generate_summary_report(self, results: List[Dict], output_file: Path, timestamp: str):
        """Generate summary report"""
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(f"CodeQL安全扫描汇总报告\n")
            f.write(f"生成时间: {timestamp}\n")
            f.write(f"{'='*50}\n\n")
            
            f.write(f"扫描统计:\n")
            f.write(f"  总项目数: {self.stats['total_projects']}\n")
            f.write(f"  Scan successful: {self.stats['scanned_projects']}\n")
            f.write(f"  Scan failed: {self.stats['failed_projects']}\n")
            f.write(f"  涉及Language: {len(self.stats['languages'])}\n\n")
            
            f.write(f"Statistics by language:\n")
            for language, stats in self.stats['languages'].items():
                f.write(f"  {language}: Total {stats['total']}, Success {stats['success']}, Failed {stats['failed']}, Skipped {stats['skipped']}\n")
            
            f.write(f"\nFailed projects details:\n")
            failed_results = [r for r in results if r['status'] == 'failed']
            for result in failed_results:
                f.write(f"  {result['project_name']} ({result['language']}): {result.get('error', 'Unknown error')}\n")
            
            f.write(f"\nSkipped projects details:\n")
            skipped_results = [r for r in results if r['status'] == 'skipped']
            for result in skipped_results:
                f.write(f"  {result['project_name']} ({result['language']}): {result.get('error', 'SARIF file already exists')}\n")
    
    def run_pipeline(self, languages: List[str] = None, max_projects_per_language: int = None):
        """Run complete scanning pipeline
        
        Args:
            languages: List of specific languages to scan (if None, scan all)
            max_projects_per_language: Maximum number of projects per language (if None, use config default)
        """
        self.stats['start_time'] = datetime.now().isoformat()
        self.logger.info("Starting CodeQL scanning pipeline")
        
        try:
            # Discover projects with filtering
            projects_by_language = self.discover_projects(
                target_languages=languages,
                max_projects_per_language=max_projects_per_language
            )
            
            if not projects_by_language:
                self.logger.warning("No projects found")
                return
            
            all_results = []
            
            # Scan by language
            for language, projects in projects_by_language.items():
                try:
                    results = self.scan_language_batch(language, projects)
                    all_results.extend(results)
                except Exception as e:
                    self.logger.error(f"扫描Language {language} exception occurred: {e}")
                    continue
            
            # Generate reports
            self.generate_reports(all_results)
            
        except Exception as e:
            self.logger.error(f"Pipeline execution exception: {e}")
            raise
        
        finally:
            self.stats['end_time'] = datetime.now().isoformat()
            self.logger.info("CodeQL scanning pipeline completed")
            self.logger.info(f"Total time: {self.stats['start_time']} - {self.stats['end_time']}")


def main():
    """Main function"""
    parser = argparse.ArgumentParser(description='CodeQL automated scanning pipeline')
    parser.add_argument('--config', help='Configuration file path')
    parser.add_argument('--languages', nargs='+', help='Specify languages to scan (e.g., Python TypeScript JavaScript)')
    parser.add_argument('--max-projects', type=int, help='Maximum number of projects to scan per language')
    parser.add_argument('--parallel-workers', type=int, help='Number of parallel worker threads')
    parser.add_argument('--timeout', type=int, help='Timeout per project scan (seconds)')
    parser.add_argument('--dry-run', action='store_true', help='Preview mode, do not actually execute scan')
    
    args = parser.parse_args()
    
    # Create pipeline
    pipeline = CodeQLPipeline(args.config)
    
    # Override configuration
    if args.max_projects:
        pipeline.config['input']['max_projects_per_language'] = args.max_projects
    if args.parallel_workers:
        pipeline.config['pipeline']['parallel_workers'] = args.parallel_workers
    if args.timeout:
        pipeline.config['codeql']['timeout'] = args.timeout
    
    if args.dry_run:
        # Preview mode
        projects_by_language = pipeline.discover_projects(
            target_languages=args.languages,
            max_projects_per_language=args.max_projects
        )
        print("Preview mode - Found projects:")
        for language, projects in projects_by_language.items():
            print(f"  {language}: {len(projects)} projects")
            for project in projects[:5]:  # Only show first 5
                print(f"    - {project.name}")
            if len(projects) > 5:
                print(f"    ... more {len(projects) - 5} projects")
    else:
        # Execute scan
        pipeline.run_pipeline(
            languages=args.languages,
            max_projects_per_language=args.max_projects
        )


if __name__ == "__main__":
    main()
