"""Usage Examples for SSH Slurm MCP Server"""

import asyncio
from fastmcp import Client
import sys
import os

# Add the src directory to the path for imports
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

from server import mcp, initialize_managers


async def example_basic_workflow():
    """Example: Basic SSH connection and code compilation workflow"""
    print("=== Basic Workflow Example ===")
    
    async with Client(mcp) as client:
        # List available tools
        tools = await client.list_tools()
        print(f"Available tools: {[tool.name for tool in tools]}")
        
        # Example connection (would need real credentials)
        print("\\nExample tool calls (with mock parameters):")
        
        # 1. Connect to SSH server
        print("1. Connecting to SSH server...")
        connection_result = await client.call_tool("connect_ssh", {
            "host": "hpc-cluster.example.com",
            "username": "researcher",
            "key_path": "/home/user/.ssh/id_rsa"
        })
        print(f"Connection result: {connection_result.text}")
        
        # 2. Check compiler availability
        print("\\n2. Checking compiler availability...")
        compiler_info = await client.call_tool("get_compiler_info", {
            "connection_id": "researcher@hpc-cluster.example.com:22",
            "compiler": ""
        })
        print(f"Compiler info: {compiler_info.text}")
        
        # 3. Upload source code
        print("\\n3. Uploading source code...")
        upload_result = await client.call_tool("upload_file", {
            "connection_id": "researcher@hpc-cluster.example.com:22",
            "local_path": "./hello.c",
            "remote_path": "hello.c"
        })
        print(f"Upload result: {upload_result.text}")
        
        # 4. Compile code
        print("\\n4. Compiling code...")
        compile_result = await client.call_tool("compile_code", {
            "connection_id": "researcher@hpc-cluster.example.com:22",
            "source_file": "hello.c",
            "compiler": "gcc",
            "output_file": "hello",
            "compile_flags": "-O2 -Wall"
        })
        print(f"Compile result: {compile_result.text}")
        
        # 5. Execute program
        print("\\n5. Executing program...")
        execute_result = await client.call_tool("execute_program", {
            "connection_id": "researcher@hpc-cluster.example.com:22",
            "program_path": "./hello",
            "arguments": ""
        })
        print(f"Execute result: {execute_result.text}")


async def example_slurm_workflow():
    """Example: Slurm job submission and monitoring workflow"""
    print("\\n=== Slurm Workflow Example ===")
    
    async with Client(mcp) as client:
        connection_id = "researcher@hpc-cluster.example.com:22"
        
        # Example Slurm job script
        job_script = """#!/bin/bash
echo "Job started at: $(date)"
echo "Running on node: $(hostname)"
echo "Current directory: $(pwd)"

# Simulate computational work
echo "Starting simulation..."
sleep 30
echo "Simulation phase 1 completed"
sleep 30  
echo "Simulation phase 2 completed"

echo "Job completed at: $(date)"
"""
        
        print("1. Submitting Slurm job...")
        submit_result = await client.call_tool("submit_slurm_job", {
            "connection_id": connection_id,
            "script_content": job_script,
            "job_name": "test_simulation",
            "partition": "compute",
            "nodes": 1,
            "cpus_per_task": 4,
            "memory": "8G",
            "time_limit": "00:05:00"
        })
        print(f"Submit result: {submit_result.text}")
        
        # Extract job ID (in real scenario)
        job_id = "12345"  # Mock job ID
        
        print("\\n2. Checking job status...")
        status_result = await client.call_tool("check_slurm_job_status", {
            "connection_id": connection_id,
            "job_id": job_id
        })
        print(f"Status result: {status_result.text}")
        
        print("\\n3. Waiting for job completion...")
        wait_result = await client.call_tool("wait_for_job_completion", {
            "connection_id": connection_id,
            "job_id": job_id,
            "check_interval": 10,
            "timeout": 300
        })
        print(f"Wait result: {wait_result.text}")


async def example_file_operations():
    """Example: File transfer and management operations"""
    print("\\n=== File Operations Example ===")
    
    async with Client(mcp) as client:
        connection_id = "researcher@hpc-cluster.example.com:22"
        
        print("1. Listing remote files...")
        list_result = await client.call_tool("list_remote_files", {
            "connection_id": connection_id,
            "directory": "~/projects",
            "pattern": "*.py"
        })
        print(f"List result: {list_result.text}")
        
        print("\\n2. Uploading configuration file...")
        upload_result = await client.call_tool("upload_file", {
            "connection_id": connection_id,
            "local_path": "./config.json",
            "remote_path": "~/projects/config.json"
        })
        print(f"Upload result: {upload_result.text}")
        
        print("\\n3. Downloading results...")
        download_result = await client.call_tool("download_file", {
            "connection_id": connection_id,
            "remote_path": "~/projects/results.txt",
            "local_path": "./downloaded_results.txt"
        })
        print(f"Download result: {download_result.text}")


async def example_multi_compiler_workflow():
    """Example: Testing different compilers and languages"""
    print("\\n=== Multi-Compiler Workflow Example ===")
    
    async with Client(mcp) as client:
        connection_id = "researcher@hpc-cluster.example.com:22"
        
        # Test C compilation with GCC
        print("1. Testing C compilation with GCC...")
        compile_c = await client.call_tool("compile_code", {
            "connection_id": connection_id,
            "source_file": "matrix_multiply.c",
            "compiler": "gcc",
            "output_file": "matrix_multiply_gcc",
            "compile_flags": "-O3 -fopenmp"
        })
        print(f"GCC compile result: {compile_c.text}")
        
        # Test C++ compilation with HIPCC for GPU
        print("\\n2. Testing C++ compilation with HIPCC for GPU...")
        compile_hip = await client.call_tool("compile_code", {
            "connection_id": connection_id,
            "source_file": "gpu_kernel.cpp",
            "compiler": "hipcc",
            "output_file": "gpu_kernel",
            "compile_flags": "-O3 -DGPU_KERNEL"
        })
        print(f"HIPCC compile result: {compile_hip.text}")
        
        # Test Python script compilation
        print("\\n3. Testing Python script compilation...")
        compile_python = await client.call_tool("compile_code", {
            "connection_id": connection_id,
            "source_file": "data_analysis.py",
            "compiler": "python",
            "output_file": "data_analysis.pyc"
        })
        print(f"Python compile result: {compile_python.text}")
        
        # Execute Python script
        print("\\n4. Executing Python script...")
        execute_python = await client.call_tool("execute_program", {
            "connection_id": connection_id,
            "program_path": "data_analysis.py",
            "arguments": "--input dataset.csv --output analysis.json"
        })
        print(f"Python execute result: {execute_python.text}")


async def example_hpc_pipeline():
    """Example: Complete HPC development pipeline"""
    print("\\n=== HPC Development Pipeline Example ===")
    
    async with Client(mcp) as client:
        connection_id = "researcher@hpc-cluster.example.com:22"
        
        # 1. Development phase
        print("Phase 1: Development and Testing")
        
        # Upload and compile code
        upload_result = await client.call_tool("upload_file", {
            "connection_id": connection_id,
            "local_path": "./simulation.cpp",
            "remote_path": "simulation.cpp"
        })
        
        compile_result = await client.call_tool("compile_code", {
            "connection_id": connection_id,
            "source_file": "simulation.cpp",
            "compiler": "hipcc",
            "output_file": "simulation",
            "compile_flags": "-O3 -DPARALLEL"
        })
        
        # Quick test run
        test_result = await client.call_tool("execute_program", {
            "connection_id": connection_id,
            "program_path": "./simulation",
            "arguments": "--test --quick"
        })
        
        print("\\nPhase 2: Production Job Submission")
        
        # 2. Production phase - submit to Slurm
        production_script = """#!/bin/bash
echo "Starting production simulation at $(date)"

# Load required modules
module load hipcc
module load openmpi

# Set environment variables
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export HIP_VISIBLE_DEVICES=0,1

# Run simulation
./simulation --input large_dataset.dat --output results_$SLURM_JOB_ID.hdf5

echo "Production simulation completed at $(date)"
"""
        
        job_result = await client.call_tool("submit_slurm_job", {
            "connection_id": connection_id,
            "script_content": production_script,
            "job_name": "production_sim",
            "partition": "gpu",
            "nodes": 2,
            "cpus_per_task": 16,
            "memory": "64G",
            "time_limit": "12:00:00"
        })
        
        print("\\nPhase 3: Monitoring and Results")
        
        # Extract job ID and monitor (mock)
        job_id = "67890"
        
        status_result = await client.call_tool("check_slurm_job_status", {
            "connection_id": connection_id,
            "job_id": job_id
        })
        
        # Download results when complete
        download_result = await client.call_tool("download_file", {
            "connection_id": connection_id,
            "remote_path": f"results_{job_id}.hdf5",
            "local_path": f"./production_results_{job_id}.hdf5"
        })
        
        print("\\nPipeline completed successfully!")


async def main():
    """Run all examples"""
    print("SSH Slurm MCP Server - Usage Examples")
    print("=====================================")
    
    # Initialize managers for testing
    initialize_managers()
    
    try:
        await example_basic_workflow()
        await example_slurm_workflow()
        await example_file_operations()
        await example_multi_compiler_workflow()
        await example_hpc_pipeline()
        
    except Exception as e:
        print(f"Example execution error: {e}")
        print("Note: These examples use mock parameters and would require real SSH credentials to function.")
    
    print("\\n=== Examples completed ===")
    print("\\nNote: All examples use mock credentials and server addresses.")
    print("To use with real servers, replace with actual connection details.")


if __name__ == "__main__":
    asyncio.run(main())
