"""Slurm Job Management Module"""

import asyncio
import logging
import re
import time
from typing import Dict, Optional, Any, List, Tuple
from .ssh_manager import SSHManager

logger = logging.getLogger(__name__)


class SlurmManager:
    """Manages Slurm job operations on remote servers"""
    
    def __init__(self, ssh_manager: SSHManager):
        self.ssh_manager = ssh_manager
        self.job_cache: Dict[str, Dict[str, Any]] = {}
    
    def submit_job(self, connection_id: str, script_content: str, job_name: str,
                   partition: str = "default", nodes: int = 1, cpus_per_task: int = 1,
                   memory: str = "4G", time_limit: str = "01:00:00") -> str:
        """Submit a Slurm batch job
        
        Args:
            connection_id: SSH connection identifier
            script_content: Job script content
            job_name: Name for the job
            partition: Slurm partition to use
            nodes: Number of nodes requested
            cpus_per_task: CPUs per task
            memory: Memory allocation (e.g., "4G")
            time_limit: Time limit (HH:MM:SS format)
            
        Returns:
            Job ID as string
            
        Raises:
            Exception: If job submission fails
        """
        try:
            # Generate SBATCH script
            sbatch_script = self._generate_sbatch_script(
                script_content, job_name, partition, nodes, cpus_per_task, memory, time_limit
            )
            
            # Create temporary script file on remote server
            script_filename = f"/tmp/job_{job_name}_{int(time.time())}.sh"
            
            # Write script to remote file
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id, f"cat > {script_filename}", working_dir="~"
            )
            
            # Upload script content
            client = self.ssh_manager.get_client(connection_id)
            stdin, stdout, stderr = client.exec_command(f"cat > {script_filename}")
            stdin.write(sbatch_script)
            stdin.close()
            
            # Make script executable
            self.ssh_manager.execute_command(
                connection_id, f"chmod +x {script_filename}", working_dir="~"
            )
            
            # Submit job using sbatch
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id, f"sbatch {script_filename}", working_dir="~"
            )
            
            if exit_code != 0:
                raise Exception(f"sbatch failed: {stderr}")
            
            # Extract job ID from sbatch output
            job_id = self._extract_job_id(stdout)
            
            # Cache job information
            self.job_cache[job_id] = {
                'job_name': job_name,
                'connection_id': connection_id,
                'script_file': script_filename,
                'submitted_at': time.time(),
                'partition': partition,
                'nodes': nodes,
                'cpus_per_task': cpus_per_task,
                'memory': memory,
                'time_limit': time_limit
            }
            
            logger.info(f"Submitted Slurm job {job_id} ({job_name}) on {connection_id}")
            return job_id
            
        except Exception as e:
            logger.error(f"Error submitting Slurm job: {str(e)}")
            raise Exception(f"Job submission failed: {str(e)}")
    
    def check_job_status(self, connection_id: str, job_id: str) -> Dict[str, Any]:
        """Check status of a specific Slurm job
        
        Args:
            connection_id: SSH connection identifier
            job_id: Slurm job ID
            
        Returns:
            Job status information dictionary
            
        Raises:
            Exception: If status check fails
        """
        try:
            # First try squeue for running jobs
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id, 
                f"squeue -j {job_id} --format='%.18i %.9P %.8j %.8u %.8T %.10M %.6D %R'",
                working_dir="~"
            )
            
            job_info = {'job_id': job_id, 'status': 'NOT_FOUND'}
            
            if exit_code == 0 and stdout.strip():
                lines = stdout.strip().split('\n')
                if len(lines) > 1:  # Skip header
                    job_line = lines[1].strip().split()
                    if len(job_line) >= 5:
                        job_info.update({
                            'status': job_line[4],  # State
                            'partition': job_line[1],
                            'job_name': job_line[2],
                            'user': job_line[3],
                            'time_used': job_line[5] if len(job_line) > 5 else '',
                            'nodes': job_line[6] if len(job_line) > 6 else '',
                            'reason': ' '.join(job_line[7:]) if len(job_line) > 7 else ''
                        })
                        
                        logger.info(f"Job {job_id} status: {job_info['status']}")
                        return job_info
            
            # If not found in squeue, check sacct for completed jobs
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id,
                f"sacct -j {job_id} --format=JobID,JobName,State,ExitCode,Elapsed --parsable2 --noheader",
                working_dir="~"
            )
            
            if exit_code == 0 and stdout.strip():
                lines = stdout.strip().split('\n')
                for line in lines:
                    if line and not line.endswith('.batch') and not line.endswith('.extern'):
                        parts = line.split('|')
                        if len(parts) >= 3:
                            job_info.update({
                                'status': parts[2],  # State
                                'job_name': parts[1],
                                'exit_code': parts[3] if len(parts) > 3 else '',
                                'elapsed_time': parts[4] if len(parts) > 4 else ''
                            })
                            break
            
            # Add cached information if available
            if job_id in self.job_cache:
                cached_info = self.job_cache[job_id]
                job_info.update({
                    'partition': cached_info.get('partition', ''),
                    'nodes': cached_info.get('nodes', ''),
                    'cpus_per_task': cached_info.get('cpus_per_task', ''),
                    'memory': cached_info.get('memory', ''),
                    'time_limit': cached_info.get('time_limit', ''),
                    'submitted_at': cached_info.get('submitted_at', '')
                })
            
            logger.info(f"Retrieved status for job {job_id}: {job_info.get('status', 'UNKNOWN')}")
            return job_info
            
        except Exception as e:
            logger.error(f"Error checking job status: {str(e)}")
            raise Exception(f"Job status check failed: {str(e)}")
    
    async def wait_for_job_completion(self, connection_id: str, job_id: str,
                                    check_interval: int = 30, timeout: int = 3600) -> Dict[str, Any]:
        """Wait for a Slurm job to complete with timeout
        
        Args:
            connection_id: SSH connection identifier
            job_id: Slurm job ID
            check_interval: Time between status checks (seconds)
            timeout: Maximum wait time (seconds)
            
        Returns:
            Final job status and output information
            
        Raises:
            Exception: If wait fails or times out
        """
        start_time = time.time()
        
        try:
            while time.time() - start_time < timeout:
                # Check job status
                job_info = self.check_job_status(connection_id, job_id)
                status = job_info.get('status', 'UNKNOWN')
                
                # Check if job is completed
                if status in ['COMPLETED', 'FAILED', 'CANCELLED', 'TIMEOUT', 'NODE_FAIL']:
                    # Get job output files
                    output_info = self._get_job_output(connection_id, job_id)
                    job_info.update(output_info)
                    
                    logger.info(f"Job {job_id} completed with status: {status}")
                    return job_info
                
                # Wait before next check
                await asyncio.sleep(check_interval)
            
            # Timeout reached
            raise Exception(f"Job {job_id} did not complete within {timeout} seconds")
            
        except Exception as e:
            logger.error(f"Error waiting for job completion: {str(e)}")
            raise Exception(f"Job wait failed: {str(e)}")
    
    def cancel_job(self, connection_id: str, job_id: str) -> bool:
        """Cancel a Slurm job
        
        Args:
            connection_id: SSH connection identifier
            job_id: Slurm job ID
            
        Returns:
            True if successful
        """
        try:
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id, f"scancel {job_id}", working_dir="~"
            )
            
            if exit_code == 0:
                logger.info(f"Cancelled job {job_id}")
                return True
            else:
                logger.error(f"Failed to cancel job {job_id}: {stderr}")
                return False
                
        except Exception as e:
            logger.error(f"Error cancelling job: {str(e)}")
            return False
    
    def _generate_sbatch_script(self, script_content: str, job_name: str,
                               partition: str, nodes: int, cpus_per_task: int,
                               memory: str, time_limit: str) -> str:
        """Generate SBATCH script with proper directives
        
        Args:
            script_content: User script content
            job_name: Job name
            partition: Slurm partition
            nodes: Number of nodes
            cpus_per_task: CPUs per task
            memory: Memory allocation
            time_limit: Time limit
            
        Returns:
            Complete SBATCH script
        """
        sbatch_directives = f"""#!/bin/bash
#SBATCH --job-name={job_name}
#SBATCH --partition={partition}
#SBATCH --nodes={nodes}
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task={cpus_per_task}
#SBATCH --mem={memory}
#SBATCH --time={time_limit}
#SBATCH --output={job_name}_%j.out
#SBATCH --error={job_name}_%j.err

# Set error handling
set -e

# Job execution starts here
echo "Job started at: $(date)"
echo "Running on node: $(hostname)"
echo "Job ID: $SLURM_JOB_ID"
echo "Working directory: $(pwd)"
echo "================================"

"""
        
        return sbatch_directives + script_content + "\n\necho \"Job completed at: $(date)\"\n"
    
    def _extract_job_id(self, sbatch_output: str) -> str:
        """Extract job ID from sbatch output
        
        Args:
            sbatch_output: Output from sbatch command
            
        Returns:
            Job ID as string
            
        Raises:
            Exception: If job ID cannot be extracted
        """
        # Look for patterns like "Submitted batch job 12345"
        match = re.search(r'Submitted batch job (\d+)', sbatch_output)
        if match:
            return match.group(1)
        
        # Alternative pattern
        match = re.search(r'job (\d+)', sbatch_output)
        if match:
            return match.group(1)
        
        raise Exception(f"Could not extract job ID from sbatch output: {sbatch_output}")
    
    def _get_job_output(self, connection_id: str, job_id: str) -> Dict[str, str]:
        """Get job output and error files
        
        Args:
            connection_id: SSH connection identifier
            job_id: Slurm job ID
            
        Returns:
            Dictionary with output and error content
        """
        output_info = {'stdout': '', 'stderr': ''}
        
        try:
            # Get job name from cache or use job_id
            job_name = job_id
            if job_id in self.job_cache:
                job_name = self.job_cache[job_id].get('job_name', job_id)
            
            # Try to read output file
            try:
                exit_code, stdout, stderr = self.ssh_manager.execute_command(
                    connection_id, f"tail -n 20 {job_name}_{job_id}.out", working_dir="~"
                )
                if exit_code == 0:
                    output_info['stdout'] = stdout
            except:
                pass
            
            # Try to read error file
            try:
                exit_code, stdout, stderr = self.ssh_manager.execute_command(
                    connection_id, f"tail -n 20 {job_name}_{job_id}.err", working_dir="~"
                )
                if exit_code == 0:
                    output_info['stderr'] = stdout
            except:
                pass
                
        except Exception as e:
            logger.warning(f"Could not retrieve job output for {job_id}: {str(e)}")
        
        return output_info
    
    def list_jobs(self, connection_id: str, user: Optional[str] = None) -> List[Dict[str, Any]]:
        """List Slurm jobs for user
        
        Args:
            connection_id: SSH connection identifier
            user: Username filter (None for current user)
            
        Returns:
            List of job information dictionaries
        """
        try:
            # Build squeue command
            cmd = "squeue --format='%.18i %.9P %.8j %.8u %.8T %.10M %.6D %R'"
            if user:
                cmd += f" -u {user}"
            
            exit_code, stdout, stderr = self.ssh_manager.execute_command(
                connection_id, cmd, working_dir="~"
            )
            
            jobs = []
            if exit_code == 0 and stdout.strip():
                lines = stdout.strip().split('\n')
                for line in lines[1:]:  # Skip header
                    parts = line.strip().split()
                    if len(parts) >= 5:
                        jobs.append({
                            'job_id': parts[0],
                            'partition': parts[1],
                            'job_name': parts[2],
                            'user': parts[3],
                            'status': parts[4],
                            'time_used': parts[5] if len(parts) > 5 else '',
                            'nodes': parts[6] if len(parts) > 6 else '',
                            'reason': ' '.join(parts[7:]) if len(parts) > 7 else ''
                        })
            
            logger.info(f"Listed {len(jobs)} jobs for user on {connection_id}")
            return jobs
            
        except Exception as e:
            logger.error(f"Error listing jobs: {str(e)}")
            return []
