"""
Simple test runner for GCR Solver Manager.

This module handles local test execution and basic SLURM job submission.
"""

import os
import subprocess
import logging
import json
import re
import time
import threading
import sys
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from datetime import datetime, timedelta, timezone

from ..core.simple_config import get_config
from ..logs.manager import LogManager

logger = logging.getLogger(__name__)


def _get_local_now() -> datetime:
    """Get current time in local timezone."""
    return datetime.now()


class SimpleTestRunner:
    """Simple test runner for GCR solvers."""
    
    def __init__(self, log_manager: Optional[LogManager] = None):
        """Initialize test runner."""
        self.config = get_config()
        self.log_manager = log_manager or LogManager()
    
    def run(self, solver: str, gpu: Optional[str] = None, params: Optional[Dict[str, Any]] = None, 
            tags: Optional[List[str]] = None) -> Tuple[bool, int]:
        """Run solver using existing binary (no build required)."""
        params = params or {}
        tags = tags or []
        
        logger.info(f"Running solver: {solver}, gpu={gpu or 'cpu'}")
        
        # Direct test execution without build
        return self.run_test(solver, gpu, params, tags)
    
    def run_test(self, solver: str, gpu: Optional[str] = None, params: Optional[Dict[str, Any]] = None, 
                tags: Optional[List[str]] = None) -> Tuple[bool, int]:
        """
        Run a test using binary execution only (requires pre-built binary).
        
        Args:
            solver: Solver type ('gcr', 'gmres', 'ca-gcr', 'bca-gmres')
            gpu: GPU type ('cuda', 'hip', None for CPU)
            params: Test parameters including binary_path
            tags: Tags for this test
            
        Returns:
            Tuple of (success, run_id)
        """
        params = params or {}
        tags = tags or []
        
        logger.info(f"Running test: solver={solver}, gpu={gpu}")
        
        # Check if binary path is provided
        binary_path = params.get('binary_path')
        if not binary_path:
            return False, 0
        
        # Execute test using provided binary
        success, test_output = self._execute_binary_test(binary_path, solver, gpu, params)
        
        # Create test configuration record
        config = {
            'solver': solver,
            'gpu': gpu,
            'params': params,
            'binary_execution': True,
            'timestamp': _get_local_now().isoformat()
        }
        
        # Store test results
        try:
            log_content = f"Binary Test Execution: solver={solver}, gpu={gpu}\n\nTest Output:\n{test_output}"
            run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags + ["binary-execution"])
            
            if success:
                logger.info(f"Test completed successfully (run_id: {run_id})")
            else:
                logger.error(f"Test failed (run_id: {run_id})")
            
            return success, run_id
            
        except Exception as e:
            log_content = f"Test failed: {str(e)}"
            run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags)
            return False, run_id
    
    def _execute_binary_test(self, binary_path: str, solver: str, gpu: Optional[str], params: Dict[str, Any]) -> Tuple[bool, str]:
        """Execute a binary test with the provided binary path."""
        try:
            binary_path = Path(binary_path)
            if not binary_path.exists():
                return False, f"Binary not found: {binary_path}"
            
            # Get MPI process count
            nproc = params.get('nproc', 1)
            
            # Build basic mpirun command
            cmd = ["mpirun", "-np", str(nproc), str(binary_path)]
            
            # Add any additional arguments if specified
            args = params.get('arguments', '')
            if args:
                cmd.extend(args.split())
            
            logger.info(f"Executing: {' '.join(cmd)}")
            
            # Execute the binary
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=1800)
            
            success = result.returncode == 0
            output = f"Command: {' '.join(cmd)}\nReturn code: {result.returncode}\n\nSTDOUT:\n{result.stdout}\n\nSTDERR:\n{result.stderr}"
            
            return success, output
            
        except subprocess.TimeoutExpired:
            return False, "Binary execution timed out after 30 minutes"
        except Exception as e:
            return False, f"Binary execution failed: {e}"
    
    def submit_slurm_job(self, solver: str, gpu: Optional[str] = None, params: Optional[Dict[str, Any]] = None, 
                        slurm_params: Optional[Dict[str, Any]] = None) -> Tuple[str, int]:
        """
        Submit test as SLURM job.
        
        Args:
            solver: Solver type
            gpu: GPU type
            params: Test parameters
            slurm_params: SLURM job parameters
            
        Returns:
            Tuple of (job_id, run_id)
        """
        slurm_params = slurm_params or {}
        params = params or {}
        
        # Create SLURM script
        job_script = self._create_slurm_script(solver, gpu, params, slurm_params)
        
        # Submit job
        try:
            result = subprocess.run(['sbatch'], input=job_script, capture_output=True, text=True)
            if result.returncode != 0:
                raise RuntimeError(f"sbatch failed: {result.stderr}")
            
            job_id = result.stdout.strip().split()[-1]
            
            # Create test record with SLURM job ID
            config = {
                'solver': solver,
                'gpu': gpu,
                'params': params,
                'slurm_params': slurm_params,
                'job_id': job_id,
                'timestamp': _get_local_now().isoformat()
            }
            
            # Store initial record (log will be updated when job completes)
            run_id = self.log_manager.store_test_log(solver, gpu, config, f"SLURM job submitted: {job_id}")
            
            return job_id, run_id
            
        except Exception as e:
            raise RuntimeError(f"Failed to submit SLURM job: {e}")
    
    def _create_slurm_script(self, solver: str, gpu: Optional[str], params: Dict[str, Any], 
                           slurm_params: Dict[str, Any]) -> str:
        """Create SLURM submission script."""
        partition = slurm_params.get('partition', self.config.get('test.slurm_partition', 'gpu'))
        nodes = slurm_params.get('nodes', 1)
        ntasks_per_node = slurm_params.get('ntasks_per_node', params.get('nproc', 4))
        time_limit = slurm_params.get('time', '01:00:00')
        account = slurm_params.get('account', self.config.get('test.slurm_account'))
        
        # Determine test script
        if gpu == 'cuda':
            test_script = "test_cuda_solvers.sh"
        elif gpu == 'hip':
            test_script = "test_hip_solvers.sh"
        else:
            test_script = "test_cuda_solvers.sh"
        
        # Build command
        cmd = [f"./{test_script}", f"--solver={solver}"]
        
        for key, value in params.items():
            if key == 'nproc':
                cmd.append(f"--nproc={value}")
            elif key == 'resolution':
                cmd.append(f"--resolution={value}")
            elif key == 'maxit':
                cmd.append(f"--maxit={value}")
            elif key == 'debug':
                cmd.append("--debug")
        
        # Create script content
        script_lines = [
            "#!/bin/bash",
            f"#SBATCH --partition={partition}",
            f"#SBATCH --nodes={nodes}",
            f"#SBATCH --ntasks-per-node={ntasks_per_node}",
            f"#SBATCH --time={time_limit}",
            f"#SBATCH --job-name=gsm-{solver}-{gpu or 'cpu'}",
        ]
        
        if account:
            script_lines.append(f"#SBATCH --account={account}")
        
        if gpu == 'cuda':
            script_lines.append("#SBATCH --gres=gpu:1")
        elif gpu == 'hip':
            script_lines.append("#SBATCH --gres=gpu:1")
        
        script_lines.extend([
            "",
            "# Load modules",
            "module load gcc",
            "module load openmpi",
            "",
            "# Run test",
            f"cd {self.source_dir}",
            " ".join(cmd),
            "",
            "# Exit with test result",
            "exit $?"
        ])
        
        return "\n".join(script_lines)
    
    def run_remote_test(self, solver: str, gpu: Optional[str] = None, params: Optional[Dict[str, Any]] = None,
                       remote_config: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None, 
                       watch: bool = False) -> Tuple[bool, int]:
        """
        Build and run test on remote system via SSH.
        
        This method performs the complete workflow:
        1. Builds the solver on the remote system using scripts/build.sh
        2. Runs the test using the appropriate test script
        
        Args:
            solver: Solver type (gcr, gmres, bca-gmres, ca-gcr)
            gpu: GPU type (cuda, hip)
            params: Test parameters (nproc, resolution, debug, maxit)
            remote_config: Remote connection configuration
            tags: Tags for this test
            
        Returns:
            Tuple of (success, run_id)
        """
        remote_config = remote_config or {}
        params = params or {}
        tags = tags or []
        
        hostname = remote_config.get('hostname', self.config.get('remote.hostname'))
        workdir = remote_config.get('workdir', self.config.get('remote.workdir'))
        
        if not hostname:
            raise ValueError("Remote hostname is required")
        
        # Build SSH command - uses SSH config for all connection details
        ssh_cmd = ['ssh', hostname]
        
        # Build remote command
        remote_cmd = []
        if workdir:
            remote_cmd.append(f"cd {workdir}")
        
        # Use pure Python approach - call gsm on remote side
        class RemoteConfig:
            def __init__(self, hostname, workdir):
                self.hostname = hostname
                self.workdir = workdir
        
        remote = RemoteConfig(hostname, workdir)
        python_cmd = self._create_remote_python_command(solver, gpu, params, remote)
        remote_cmd.append(python_cmd)
        
        ssh_cmd.append("; ".join(remote_cmd))
        
        # Create test configuration record
        config = {
            'solver': solver,
            'gpu': gpu,
            'params': params,
            'remote_config': remote_config,
            'timestamp': _get_local_now().isoformat()
        }
        
        # Execute remote test
        try:
            if watch:
                success, run_id = self._execute_remote_with_watch(ssh_cmd, solver, gpu, config, tags)
                return success, run_id
            else:
                result = subprocess.run(ssh_cmd, capture_output=True, text=True, timeout=3600)
                success = result.returncode == 0
                log_content = f"Remote command: {' '.join(ssh_cmd)}\n\nstdout:\n{result.stdout}\n\nstderr:\n{result.stderr}"
                
                run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags)
                return success, run_id
                
        except Exception as e:
            log_content = f"Remote test failed: {str(e)}"
            run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags)
            return False, run_id
    
    def _execute_remote_with_watch(self, ssh_cmd: List[str], solver: str, gpu: str, 
                                  config: Dict[str, Any], tags: List[str]) -> Tuple[bool, int]:
        """
        Execute remote command with real-time output and SLURM progress tracking.
        """
        print(f"🔄 Executing: {' '.join(ssh_cmd)}")
        print("📡 Starting remote build and test execution...")
        
        # Capture all output for logging
        all_output = []
        
        # Start SSH process with real-time output
        process = subprocess.Popen(
            ssh_cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            bufsize=1,
            universal_newlines=True
        )
        
        # Variables for SLURM job tracking
        slurm_job_id = None
        slurm_start_time = None
        progress_thread = None
        
        try:
            # Read output line by line
            for line in iter(process.stdout.readline, ''):
                line = line.rstrip()
                all_output.append(line)
                
                # Print build output directly
                if self._is_build_output(line):
                    print(f"🔨 {line}")
                # Detect SLURM job submission
                elif "Submitted batch job" in line:
                    slurm_job_id = self._extract_slurm_job_id(line)
                    if slurm_job_id:
                        print(f"📋 SLURM job submitted: {slurm_job_id}")
                        slurm_start_time = _get_local_now()
                        # Start progress bar in separate thread
                        progress_thread = self._start_slurm_progress_tracker(slurm_job_id, solver, gpu)
                # Print other important output
                elif self._is_important_output(line):
                    print(f"ℹ️  {line}")
            
            # Wait for process completion
            process.wait()
            
            # Stop progress tracking
            if progress_thread:
                progress_thread.stop()
                progress_thread.join()
            
            # Record timing data for future predictions
            if slurm_job_id and slurm_start_time:
                total_time = (_get_local_now() - slurm_start_time).total_seconds()
                self._record_timing_data(solver, gpu, config['params'], total_time)
            
            success = process.returncode == 0
            log_content = f"Remote command: {' '.join(ssh_cmd)}\n\n" + "\n".join(all_output)
            
            run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags)
            
            if success:
                print("✅ Remote test completed successfully!")
            else:
                print("❌ Remote test failed!")
            
            return success, run_id
            
        except Exception as e:
            if progress_thread:
                progress_thread.stop()
            log_content = f"Remote test failed: {str(e)}"
            run_id = self.log_manager.store_test_log(solver, gpu, config, log_content, tags)
            return False, run_id
    
    def _is_build_output(self, line: str) -> bool:
        """Check if line contains build-related output."""
        build_indicators = [
            "-- The C compiler identification",
            "-- Check for working C compiler",
            "-- Looking for",
            "-- Found",
            "-- Configuring done",
            "-- Generating done",
            "-- Build files have been written",
            "Scanning dependencies",
            "Building C object",
            "Building CXX object",
            "Building CUDA object",
            "Linking",
            "[100%]",
            "Built target",
            "cmake",
            "make",
            "nvcc",
            "hipcc"
        ]
        return any(indicator in line for indicator in build_indicators)
    
    def _is_important_output(self, line: str) -> bool:
        """Check if line contains important non-build output."""
        important_indicators = [
            "Error:",
            "error:",
            "Warning:",
            "warning:",
            "FAILED",
            "SUCCESS",
            "Convergence",
            "Iteration",
            "squeue",
            "srun"
        ]
        return any(indicator in line for indicator in important_indicators)
    
    def _execute_solver_test(self, solver: str, gpu: Optional[str], params: Dict[str, Any]) -> Tuple[bool, str]:
        """Execute a solver test using the universal binary."""
        try:
            # Determine binary name based on GPU type and debug mode
            is_debug = params.get('debug', False)
            binary_name = self._get_binary_name(gpu, is_debug)
            binary_path = self.source_dir / "build" / binary_name
            
            if not binary_path.exists():
                return False, f"Binary not found: {binary_path}"
            
            # Build mpirun command for the solver
            cmd = self._build_mpirun_command(binary_path, solver, params)
            
            logger.info(f"Executing: {' '.join(cmd)}")
            
            # Execute the solver
            result = subprocess.run(cmd, cwd=self.source_dir, capture_output=True, text=True, timeout=1800)
            
            success = result.returncode == 0
            output = f"Command: {' '.join(cmd)}\nReturn code: {result.returncode}\n\nSTDOUT:\n{result.stdout}\n\nSTDERR:\n{result.stderr}"
            
            return success, output
            
        except subprocess.TimeoutExpired:
            return False, "Test execution timed out after 30 minutes"
        except Exception as e:
            return False, f"Test execution failed: {e}"
    
    def _get_binary_name(self, gpu: Optional[str], debug: bool = False) -> str:
        """Get the expected binary name based on GPU type and debug mode to match build system output."""
        name = "main"
        
        if gpu == 'cuda':
            name += "-cuda"
            # Add NCCL and CA suffixes (always enabled in universal build)
            name += "-nccl-ca"
        elif gpu == 'hip':
            name += "-hip"
            # Add RCCL and CA suffixes (always enabled in universal build)
            name += "-rccl-ca"
        
        if debug:
            name += "-debug"
        
        return f"{name}.exe"
    
    def _build_mpirun_command(self, binary_path: Path, solver: str, params: Dict[str, Any]) -> List[str]:
        """Build the mpirun command for executing the solver."""
        nproc = params.get('nproc', 4)
        cmd = [
            "mpirun",
            f"-np", str(nproc),
            str(binary_path),
            f"--solver={solver}",
            f"--resolution={params.get('resolution', 1.0)}"
        ]
        
        # Add nprocx parameter for domain decomposition (default nprocx=2 to match shell script)
        nprocx = params.get('nprocx', 2)
        cmd.append(f"--nprocx={nprocx}")
        
        # Add maximum iterations
        if params.get('debug'):
            cmd.append("--maxit=1")  # Debug mode uses 1 iteration
        else:
            cmd.append(f"--maxit={params.get('maxit', 20)}")
        
        # Add other solver-specific flags based on solver type
        if solver == 'ca-gcr':
            cmd.append("--use-ca")
        elif solver == 'gmres':
            cmd.append("--use-gmres")
        elif solver == 'bca-gmres':
            cmd.append("--use-gmres")
            cmd.append("--use-ca")
        
        return cmd
    
    def _create_remote_python_command(self, solver: str, gpu: Optional[str], params: Dict[str, Any], remote) -> str:
        """Create a remote command that installs updated gsm and uses pure Python approach."""
        # Install/update gsm on remote server first, then run with Python build engine
        
        install_cmd = "cd gcr-solver-manager && pip install -e . --break-system-packages --quiet"
        
        run_cmd_parts = ["gsm", "run", f"--solver={solver}"]
        
        if gpu:
            run_cmd_parts.append(f"--gpu={gpu}")
        
        if params.get('nproc'):
            run_cmd_parts.append(f"--nproc={params['nproc']}")
        
        if params.get('resolution'):
            run_cmd_parts.append(f"--resolution={params['resolution']}")
        
        if params.get('debug'):
            run_cmd_parts.append("--debug")
        
        if params.get('maxit'):
            run_cmd_parts.append(f"--maxit={params['maxit']}")
        
        run_cmd = ' '.join(run_cmd_parts)
        
        # Combine install and run commands
        return f"{install_cmd} && {run_cmd}"
    
    def _extract_slurm_job_id(self, line: str) -> Optional[str]:
        """Extract SLURM job ID from submission output."""
        match = re.search(r"Submitted batch job (\d+)", line)
        return match.group(1) if match else None
    
    def _start_slurm_progress_tracker(self, job_id: str, solver: str, gpu: str) -> 'ProgressTracker':
        """Start SLURM job progress tracking in separate thread."""
        tracker = ProgressTracker(job_id, solver, gpu, self._get_predicted_time(solver, gpu))
        tracker.start()
        return tracker
    
    def _get_predicted_time(self, solver: str, gpu: str) -> int:
        """Get predicted execution time based on historical data."""
        # Default times based on solver and GPU type (in seconds)
        defaults = {
            ('gcr', 'cuda'): 300,      # 5 minutes
            ('gcr', 'hip'): 420,       # 7 minutes  
            ('gmres', 'cuda'): 360,    # 6 minutes
            ('gmres', 'hip'): 480,     # 8 minutes
            ('bca-gmres', 'cuda'): 480, # 8 minutes
            ('bca-gmres', 'hip'): 600,  # 10 minutes
            ('ca-gcr', 'cuda'): 360,   # 6 minutes
            ('ca-gcr', 'hip'): 480     # 8 minutes
        }
        
        # Try to load historical timing data
        timing_file = Path(self.source_dir) / '.gsm_timings.json'
        if timing_file.exists():
            try:
                with open(timing_file, 'r') as f:
                    timing_data = json.load(f)
                    key = f"{solver}_{gpu}"
                    if key in timing_data and timing_data[key]['count'] > 0:
                        return int(timing_data[key]['avg_time'])
            except Exception:
                pass
        
        return defaults.get((solver, gpu), 420)  # Default 7 minutes
    
    def _record_timing_data(self, solver: str, gpu: str, params: Dict[str, Any], actual_time: float) -> None:
        """Record actual execution time for future predictions."""
        timing_file = Path(self.source_dir) / '.gsm_timings.json'
        
        # Load existing data
        timing_data = {}
        if timing_file.exists():
            try:
                with open(timing_file, 'r') as f:
                    timing_data = json.load(f)
            except Exception:
                pass
        
        # Update timing data
        key = f"{solver}_{gpu}"
        if key not in timing_data:
            timing_data[key] = {'total_time': 0, 'count': 0, 'avg_time': 0}
        
        timing_data[key]['total_time'] += actual_time
        timing_data[key]['count'] += 1
        timing_data[key]['avg_time'] = timing_data[key]['total_time'] / timing_data[key]['count']
        
        # Save updated data
        try:
            with open(timing_file, 'w') as f:
                json.dump(timing_data, f, indent=2)
        except Exception as e:
            logger.debug(f"Failed to save timing data: {e}")

    def get_recent_tests(self, n: int = 10, solver: Optional[str] = None, 
                        gpu: Optional[str] = None) -> List[Dict[str, Any]]:
        """Get recent test runs."""
        return self.log_manager.db.get_latest(n, 'test', solver, gpu)
    
    def check_slurm_availability(self) -> bool:
        """Check if SLURM is available."""
        try:
            subprocess.run(['sinfo'], capture_output=True, timeout=10)
            return True
        except (subprocess.SubprocessError, FileNotFoundError):
            return False
    
    def get_test_statistics(self) -> Dict[str, Any]:
        """Get test statistics."""
        stats = self.log_manager.db.get_statistics()
        
        test_stats = {
            'total_tests': stats['overview']['test_runs'],
            'successful_tests': 0,
            'failed_tests': 0,
            'solver_breakdown': {},
            'gpu_breakdown': {}
        }
        
        recent_tests = self.get_recent_tests(100)
        
        for test in recent_tests:
            if test['status'] == 'success':
                test_stats['successful_tests'] += 1
            elif test['status'] == 'failed':
                test_stats['failed_tests'] += 1
            
            # Solver breakdown
            solver = test['solver']
            if solver not in test_stats['solver_breakdown']:
                test_stats['solver_breakdown'][solver] = {'total': 0, 'success': 0, 'failed': 0}
            
            test_stats['solver_breakdown'][solver]['total'] += 1
            if test['status'] == 'success':
                test_stats['solver_breakdown'][solver]['success'] += 1
            elif test['status'] == 'failed':
                test_stats['solver_breakdown'][solver]['failed'] += 1
            
            # GPU breakdown
            gpu = test['gpu'] or 'cpu'
            if gpu not in test_stats['gpu_breakdown']:
                test_stats['gpu_breakdown'][gpu] = {'total': 0, 'success': 0, 'failed': 0}
            
            test_stats['gpu_breakdown'][gpu]['total'] += 1
            if test['status'] == 'success':
                test_stats['gpu_breakdown'][gpu]['success'] += 1
            elif test['status'] == 'failed':
                test_stats['gpu_breakdown'][gpu]['failed'] += 1
        
        return test_stats
    
    def check_slurm_job_status(self, job_id: str) -> Dict[str, Any]:
        """Check status of SLURM job."""
        try:
            # Check job status using squeue
            result = subprocess.run(['squeue', '-j', job_id, '-h', '-o', '%T %R'], 
                                  capture_output=True, text=True, timeout=10)
            
            if result.returncode == 0 and result.stdout.strip():
                parts = result.stdout.strip().split()
                status = parts[0] if parts else 'UNKNOWN'
                reason = parts[1] if len(parts) > 1 else ''
                return {
                    'status': status,
                    'reason': reason,
                    'found': True
                }
            else:
                # Job not found in queue, check if it completed
                # Try to get job info from sacct
                result = subprocess.run(['sacct', '-j', job_id, '-n', '-o', 'State,ExitCode'], 
                                      capture_output=True, text=True, timeout=10)
                
                if result.returncode == 0 and result.stdout.strip():
                    lines = [line.strip() for line in result.stdout.strip().split('\n') if line.strip()]
                    if lines:
                        # Get the last line (should be the main job)
                        parts = lines[-1].split()
                        state = parts[0] if parts else 'UNKNOWN'
                        exit_code = parts[1] if len(parts) > 1 else '0:0'
                        
                        return {
                            'status': state,
                            'exit_code': exit_code,
                            'found': True
                        }
                
                return {'status': 'NOT_FOUND', 'found': False}
                
        except Exception as e:
            logger.warning(f"Failed to check SLURM job status for {job_id}: {e}")
            return {'status': 'ERROR', 'error': str(e), 'found': False}
    
    def get_slurm_job_output(self, job_id: str, run_id: int) -> bool:
        """Retrieve SLURM job output and update database record."""
        try:
            # Look for job output files
            output_patterns = [
                f"slurm-{job_id}.out",
                f"gsm-*-{job_id}.out",
                f"job-{job_id}.out"
            ]
            
            output_file = None
            for pattern in output_patterns:
                matches = list(self.source_dir.glob(pattern))
                if matches:
                    output_file = matches[0]
                    break
            
            if not output_file or not output_file.exists():
                logger.warning(f"SLURM output file not found for job {job_id}")
                return False
            
            # Read output file
            log_content = output_file.read_text()
            
            # Update database record with actual log content
            db = self.log_manager.db
            run_data = db.get_by_id(run_id)
            if run_data:
                # Update the log file with actual output
                log_path = Path(run_data['log_path'])
                if log_path.exists():
                    # Append SLURM output to existing log
                    existing_content = log_path.read_text()
                    combined_content = existing_content + "\n\n=== SLURM JOB OUTPUT ===\n" + log_content
                    log_path.write_text(combined_content)
                else:
                    # Create new log file with SLURM output
                    log_path.parent.mkdir(parents=True, exist_ok=True)
                    log_path.write_text(f"=== SLURM JOB OUTPUT ===\n{log_content}")
                
                return True
            
        except Exception as e:
            logger.error(f"Failed to retrieve SLURM job output for {job_id}: {e}")
        
        return False
    
    def monitor_slurm_jobs(self) -> Dict[str, Any]:
        """Monitor all active SLURM jobs from database."""
        # Get recent runs that might have SLURM job IDs
        recent_runs = self.log_manager.db.get_latest(50, 'test')
        
        slurm_jobs = []
        for run in recent_runs:
            if run.get('config'):
                config = run['config']
                if isinstance(config, str):
                    import json
                    try:
                        config = json.loads(config)
                    except:
                        continue
                
                if 'job_id' in config:
                    job_id = config['job_id']
                    status_info = self.check_slurm_job_status(job_id)
                    
                    job_info = {
                        'run_id': run['id'],
                        'job_id': job_id,
                        'solver': run['solver'],
                        'gpu': run['gpu'],
                        'created_at': run['created_at'],
                        'status': status_info.get('status', 'UNKNOWN')
                    }
                    
                    # If job is completed, try to get output
                    if status_info.get('status') in ['COMPLETED', 'FAILED', 'CANCELLED']:
                        if run['status'] == 'running':
                            # Job completed but database not updated
                            self.get_slurm_job_output(job_id, run['id'])
                            job_info['output_retrieved'] = True
                    
                    slurm_jobs.append(job_info)
        
        return {
            'total_jobs': len(slurm_jobs),
            'jobs': slurm_jobs
        }
    
    def run_remote_hip_test(self, solver: str, params: Dict[str, Any], tags: Optional[List[str]] = None) -> Tuple[bool, int]:
        """
        Run HIP test on remote server using remote_test_hip_solvers.sh.
        
        Args:
            solver: Solver type ('gcr', 'ca-gcr', 'gmres', 'bca-gmres')
            params: Test parameters
            tags: Tags for this test
            
        Returns:
            Tuple of (success, run_id)
        """
        tags = tags or []
        
        # Get remote configuration
        config = get_config()
        remote_workdir = config.get('remote.workdir', '/public/home/suyuexinghen/test/gcr-nccl')
        
        # Build remote_test_hip_solvers.sh command
        cmd = [str(self.source_dir / "remote_test_hip_solvers.sh")]
        cmd.append(f"--solver={solver}")
        
        # Add parameters
        for key, value in params.items():
            if key == 'nproc' and value != 4:  # Only add if different from default
                cmd.append(f"--nproc={value}")
            elif key == 'resolution' and value != 1.0:  # Only add if different from default
                cmd.append(f"--resolution={value}")
            elif key == 'maxit':
                cmd.append(f"--maxit={value}")
            elif key == 'debug' and value:
                cmd.append("--debug")
            elif key == 'both' and value:
                cmd.append("--both")
        
        # Always use SLURM for remote HIP tests
        cmd.append("--slurm")
        
        # Create test configuration record
        test_config = {
            'solver': solver,
            'gpu': 'hip',
            'params': params,
            'remote': True,
            'command': cmd,
            'remote_workdir': remote_workdir,
            'timestamp': _get_local_now().isoformat()
        }
        
        # Execute remote test
        try:
            result = subprocess.run(cmd, cwd=self.source_dir, capture_output=True, text=True, timeout=7200)  # 2 hour timeout
            success = result.returncode == 0
            log_content = f"Remote HIP test command: {' '.join(cmd)}\n\nstdout:\n{result.stdout}\n\nstderr:\n{result.stderr}"
            
            # Try to extract SLURM job ID from output for tracking
            job_id_match = re.search(r'Job ID:\s+(\d+)', result.stdout)
            if job_id_match:
                test_config['slurm_job_id'] = job_id_match.group(1)
            
            run_id = self.log_manager.store_test_log(solver, 'hip', test_config, log_content, tags)
            return success, run_id
            
        except Exception as e:
            log_content = f"Remote HIP test failed: {str(e)}"
            run_id = self.log_manager.store_test_log(solver, 'hip', test_config, log_content, tags)
            return False, run_id
    
    def sync_remote_results(self, verbose: bool = False) -> bool:
        """
        Synchronize git repository and log files with remote server.
        
        Args:
            verbose: Show detailed output
            
        Returns:
            True if sync completed successfully
        """
        try:
            config = get_config()
            
            # Set git environment variables from configuration
            env = os.environ.copy()
            env['GIT_REMOTE_BRANCH'] = config.get('remote.git_remote_branch', 'nccl')
            env['GIT_LOCAL_BRANCH'] = config.get('remote.git_local_branch', 'nccl')
            env['GIT_REMOTE_NAME'] = config.get('remote.git_remote_name', 'origin')
            env['GIT_LOCAL_NAME'] = config.get('remote.git_local_name', 'local')
            
            # Use the existing remote_test_hip_solvers.sh sync functionality
            cmd = [str(self.source_dir / "remote_test_hip_solvers.sh"), "--sync"]
            
            if verbose:
                cmd.append("--verbose")
            
            result = subprocess.run(cmd, cwd=self.source_dir, capture_output=not verbose, text=True, env=env)
            
            if result.returncode == 0:
                # Parse synced logs and update database
                self._process_synced_logs()
                return True
            else:
                logger.error(f"Sync failed with exit code {result.returncode}")
                if not verbose and result.stderr:
                    logger.error(f"Sync stderr: {result.stderr}")
                return False
                
        except Exception as e:
            logger.error(f"Sync operation failed: {e}")
            return False
    
    def sync_remote_logs_only(self, verbose: bool = False) -> bool:
        """
        Sync only log files from remote server without git operations.
        
        Args:
            verbose: Show detailed output
            
        Returns:
            True if sync completed successfully
        """
        try:
            config = get_config()
            hostname = config.get('remote.hostname')
            remote_workdir = config.get('remote.workdir')
            
            if not all([hostname, remote_workdir]):
                logger.error("Incomplete remote configuration for log sync - need hostname and workdir")
                return False
            
            # Build rsync command for log files only (same pattern as remote_test_hip_solvers.sh)
            rsync_cmd = ['rsync', '-avz']
            rsync_cmd.extend(['--include=hip_test_logs/', '--include=hip_test_logs/*.log'])
            rsync_cmd.extend(['--include=logs/', '--include=logs/**'])
            rsync_cmd.append('--exclude=*')
            
            # Use SSH config hostname for rsync
            rsync_cmd.append(f'{hostname}:{remote_workdir}/')
            rsync_cmd.append('./')
            
            result = subprocess.run(rsync_cmd, cwd=self.source_dir, capture_output=not verbose, text=True)
            
            if result.returncode == 0:
                # Parse synced logs and update database
                self._process_synced_logs()
                return True
            else:
                logger.error(f"Log sync failed with exit code {result.returncode}")
                return False
                
        except Exception as e:
            logger.error(f"Log sync operation failed: {e}")
            return False
    
    def _process_synced_logs(self) -> None:
        """
        Process newly synced log files and update database with results.
        """
        try:
            # Look for new SLURM output files in logs/runlog/
            runlog_dir = self.source_dir / "logs" / "runlog"
            if runlog_dir.exists():
                for log_file in runlog_dir.glob("scnet_*.out"):
                    self._process_slurm_log(log_file)
            
            # Look for new solver result files in logs/results/
            results_dir = self.source_dir / "logs" / "results"
            if results_dir.exists():
                for log_file in results_dir.glob("scnet_*.log"):
                    self._process_solver_result_log(log_file)
            
            # Look for new HIP test logs
            hip_logs_dir = self.source_dir / "hip_test_logs"
            if hip_logs_dir.exists():
                for log_file in hip_logs_dir.glob("hip_test_*_*.log"):
                    self._process_hip_test_log(log_file)
                    
        except Exception as e:
            logger.error(f"Failed to process synced logs: {e}")
    
    def _process_slurm_log(self, log_file: Path) -> None:
        """
        Process a SLURM output log file and extract job information.
        
        Args:
            log_file: Path to SLURM output file
        """
        try:
            # Extract job ID from filename (scnet_JOBID.out)
            job_id = log_file.stem.split('_')[1]
            
            # Check if we already processed this log
            db = self.log_manager.db
            existing_runs = db.get_latest(100)
            
            for run in existing_runs:
                if run.get('config'):
                    config = run['config']
                    if isinstance(config, str):
                        try:
                            config = json.loads(config)
                        except:
                            continue
                    
                    if config.get('slurm_job_id') == job_id:
                        # Update existing run with SLURM output
                        log_content = log_file.read_text()
                        
                        # Check if job completed successfully
                        success = "End time:" in log_content and "Error:" not in log_content
                        
                        # Update run status and add SLURM log content
                        existing_log_path = Path(run['log_path'])
                        if existing_log_path.exists():
                            current_content = existing_log_path.read_text()
                            combined_content = current_content + f"\n\n=== SLURM JOB OUTPUT (Job {job_id}) ===\n" + log_content
                            existing_log_path.write_text(combined_content)
                        
                        logger.info(f"Updated run {run['id']} with SLURM job {job_id} output")
                        break
            
        except Exception as e:
            logger.error(f"Failed to process SLURM log {log_file}: {e}")
    
    def _process_solver_result_log(self, log_file: Path) -> None:
        """
        Process a solver result log file from logs/results/.
        
        Args:
            log_file: Path to solver result log file
        """
        try:
            content = log_file.read_text()
            
            # Extract solver information from log content
            solver_match = re.search(r'Solver:\s+(\w+)', content)
            if not solver_match:
                return
            
            solver = solver_match.group(1).lower()
            
            # Look for final residual norm
            norm_match = re.search(r'final.*residual.*norm[:\s]+([0-9.eE-]+)', content, re.IGNORECASE)
            final_norm = norm_match.group(1) if norm_match else None
            
            # Look for iterations
            iter_match = re.search(r'iterations[:\s]+(\d+)', content, re.IGNORECASE)
            iterations = int(iter_match.group(1)) if iter_match else None
            
            # Create a database entry for this result
            config = {
                'solver': solver,
                'gpu': 'hip',
                'remote': True,
                'source_file': str(log_file),
                'final_norm': final_norm,
                'iterations': iterations,
                'timestamp': _get_local_now().isoformat()
            }
            
            # Store in database
            self.log_manager.store_test_log(solver, 'hip', config, content, ['remote', 'slurm'])
            logger.info(f"Processed solver result log: {log_file.name}")
            
        except Exception as e:
            logger.error(f"Failed to process solver result log {log_file}: {e}")
    
    def _process_hip_test_log(self, log_file: Path) -> None:
        """
        Process a HIP test log file.
        
        Args:
            log_file: Path to HIP test log file
        """
        try:
            content = log_file.read_text()
            
            # Extract solver from filename (hip_test_SOLVER_TIMESTAMP.log)
            filename_parts = log_file.stem.split('_')
            if len(filename_parts) >= 3:
                solver = '_'.join(filename_parts[2:-1])  # Handle multi-part solver names like ca-gcr
                
                # Store in database as processed test log
                config = {
                    'solver': solver,
                    'gpu': 'hip',
                    'remote': True,
                    'source_file': str(log_file),
                    'log_type': 'hip_test',
                    'timestamp': _get_local_now().isoformat()
                }
                
                self.log_manager.store_test_log(solver, 'hip', config, content, ['remote', 'hip_test'])
                logger.debug(f"Processed HIP test log: {log_file.name}")
                
        except Exception as e:
            logger.error(f"Failed to process HIP test log {log_file}: {e}")


class ProgressTracker(threading.Thread):
    """
    Thread-based progress tracker for SLURM jobs with adaptive time prediction.
    """
    
    def __init__(self, job_id: str, solver: str, gpu: str, predicted_time: int):
        super().__init__(daemon=True)
        self.job_id = job_id
        self.solver = solver
        self.gpu = gpu
        self.predicted_time = predicted_time
        self.start_time = time.time()
        self.running = True
        self.job_completed = False
        
    def stop(self):
        """Stop the progress tracker."""
        self.running = False
        
    def run(self):
        """Main progress tracking loop."""
        print(f"🎯 Predicted execution time: {self.predicted_time//60}m {self.predicted_time%60}s")
        
        # Initial delay to let job start
        time.sleep(5)
        
        while self.running and not self.job_completed:
            elapsed = int(time.time() - self.start_time)
            
            # Check job status
            job_status = self._check_slurm_job_status()
            
            if job_status in ['COMPLETED', 'FAILED', 'CANCELLED', 'TIMEOUT']:
                self.job_completed = True
                self._show_final_status(job_status, elapsed)
                break
            elif job_status == 'RUNNING':
                self._show_progress_bar(elapsed)
            elif job_status == 'PENDING':
                self._show_pending_status(elapsed)
            
            time.sleep(10)  # Update every 10 seconds
            
    def _check_slurm_job_status(self) -> str:
        """Check current SLURM job status."""
        try:
            result = subprocess.run(
                ['squeue', '-h', '-j', self.job_id, '-o', '%T'],
                capture_output=True, text=True, timeout=10
            )
            if result.returncode == 0 and result.stdout.strip():
                return result.stdout.strip()
            else:
                # Job might be completed and no longer in queue
                return 'COMPLETED'
        except Exception:
            return 'UNKNOWN'
    
    def _show_progress_bar(self, elapsed: int):
        """Show progress bar for running job."""
        if self.predicted_time > 0:
            progress = min(elapsed / self.predicted_time, 1.0)
        else:
            progress = 0.5  # Unknown progress
            
        bar_width = 30
        filled = int(bar_width * progress)
        bar = '█' * filled + '░' * (bar_width - filled)
        
        elapsed_str = f"{elapsed//60}m {elapsed%60}s"
        remaining = max(0, self.predicted_time - elapsed)
        remaining_str = f"{remaining//60}m {remaining%60}s"
        
        print(f"\r🏃 [{bar}] {progress*100:5.1f}% | ⏱️  {elapsed_str} | ⏳ ~{remaining_str} remaining", end='', flush=True)
        
    def _show_pending_status(self, elapsed: int):
        """Show status for pending job."""
        elapsed_str = f"{elapsed//60}m {elapsed%60}s"
        print(f"\r⏸️  Job pending in queue... ⏱️  {elapsed_str}", end='', flush=True)
        
    def _show_final_status(self, status: str, elapsed: int):
        """Show final job status."""
        elapsed_str = f"{elapsed//60}m {elapsed%60}s"
        print()  # New line after progress bar
        
        if status == 'COMPLETED':
            print(f"✅ SLURM job {self.job_id} completed successfully in {elapsed_str}")
        elif status == 'FAILED':
            print(f"❌ SLURM job {self.job_id} failed after {elapsed_str}")
        elif status == 'CANCELLED':
            print(f"🛑 SLURM job {self.job_id} was cancelled after {elapsed_str}")
        elif status == 'TIMEOUT':
            print(f"⏰ SLURM job {self.job_id} timed out after {elapsed_str}")
        else:
            print(f"🏁 SLURM job {self.job_id} finished with status {status} after {elapsed_str}")