"""
Analysis endpoints for CodeMCP API.

Provides endpoints for code analysis, exploration, and call tree generation.
"""

import asyncio
from typing import Dict, Any, List, Optional
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, Query
from fastapi.responses import StreamingResponse
import json
from datetime import datetime
import uuid

from ...core.error_handler import handle_error, log_info, log_warning
from ...core.config import Config
from ...gateway.load_balancer import LoadBalancer
from ...gateway.session import SessionManager
from ..models.requests import (
    AnalysisRequest, 
    ExplorationRequest,
    FunctionInfoRequest,
    CallChainRequest,
    BatchAnalysisRequest,
    NodeSelectionRequest,
    CodeViewRequest,
    ExportRequest,
    DependencyAnalysisRequest,
    ValidateFileRequest,
    SearchRequest
)
from ..models.responses import (
    AnalysisResponse,
    ExplorationResponse, 
    FunctionInfoResponse,
    CallChainResponse,
    BatchAnalysisResponse,
    CodeViewResponse,
    DependencyAnalysisResponse,
    ValidationResponse,
    SearchResponse,
    ExportResponse,
    ErrorResponse
)

router = APIRouter(prefix="/analysis", tags=["analysis"])

# Dependencies (these would be injected in the main app)
def get_config() -> Config:
    """Get configuration dependency."""
    # This would be properly injected in the main application
    return Config()

def get_load_balancer() -> LoadBalancer:
    """Get load balancer dependency."""
    # This would be properly injected in the main application
    pass

def get_session_manager() -> SessionManager:
    """Get session manager dependency."""
    # This would be properly injected in the main application
    pass


@router.post("/analyze", response_model=AnalysisResponse)
async def analyze_code(
    request: AnalysisRequest,
    background_tasks: BackgroundTasks,
    config: Config = Depends(get_config),
    load_balancer: LoadBalancer = Depends(get_load_balancer),
    session_manager: SessionManager = Depends(get_session_manager)
) -> AnalysisResponse:
    """
    Analyze code and generate call tree.
    
    This endpoint analyzes the specified file and function to generate
    a comprehensive call tree with optional cross-language support.
    """
    request_id = str(uuid.uuid4())
    start_time = datetime.utcnow()
    
    try:
        log_info(f"Starting analysis request {request_id}", {
            "file_path": request.file_path,
            "function_name": request.function_name,
            "mode": request.mode,
            "language": request.language
        })
        
        # Create or get session
        if request.session_id:
            session = await session_manager.get_session(request.session_id)
            if not session:
                raise HTTPException(status_code=404, detail="Session not found")
        else:
            session = await session_manager.create_session(
                session_type="analysis",
                configuration={
                    "mode": request.mode,
                    "max_depth": request.max_depth,
                    "cross_language": request.cross_language
                }
            )
            request.session_id = session.session_id
        
        # Detect language if not specified
        if not request.language:
            # Select appropriate MCP instance for language detection
            instance = await load_balancer.select_instance()
            if not instance:
                raise HTTPException(status_code=503, detail="No MCP instances available")
            
            # Call validation endpoint to detect language
            validation_request = {
                "tool": "validate_file",
                "parameters": {
                    "file_path": request.file_path,
                    "check_syntax": False
                }
            }
            
            success, response = await load_balancer.execute_request(instance, validation_request)
            if not success:
                raise HTTPException(status_code=400, detail=f"File validation failed: {response.get('error')}")
            
            detected_language = response.get("result", {}).get("language")
            if not detected_language:
                raise HTTPException(status_code=400, detail="Could not detect file language")
            
            request.language = detected_language
        
        # Select MCP instance for the detected/specified language
        instance = await load_balancer.select_instance(language=request.language.value)
        if not instance:
            raise HTTPException(
                status_code=503, 
                detail=f"No MCP instances available for language: {request.language}"
            )
        
        # Build analysis request for MCP
        mcp_request = {
            "tool": "build_call_tree",
            "parameters": {
                "entry_point": f"{request.file_path}::{request.function_name}" if request.function_name else request.file_path,
                "max_depth": request.max_depth,
                "include_dependencies": request.include_dependencies,
                "cross_language": request.cross_language,
                "session_id": request.session_id
            }
        }
        
        # Execute analysis
        success, mcp_response = await load_balancer.execute_request(instance, mcp_request)
        
        if not success:
            raise HTTPException(
                status_code=500, 
                detail=f"Analysis failed: {mcp_response.get('error', 'Unknown error')}"
            )
        
        # Process analysis results
        call_tree_data = mcp_response.get("result", {})
        
        # If cross-language analysis is enabled, collect results from multiple MCPs
        if request.cross_language and call_tree_data.get("cross_language_calls"):
            background_tasks.add_task(
                _process_cross_language_calls,
                call_tree_data,
                load_balancer,
                session_manager,
                request.session_id
            )
        
        # Calculate processing time
        processing_time = (datetime.utcnow() - start_time).total_seconds()
        
        # Build response
        response = AnalysisResponse(
            request_id=request_id,
            processing_time=processing_time,
            call_tree=call_tree_data,
            analysis_metadata={
                "mode": request.mode,
                "max_depth": request.max_depth,
                "cross_language": request.cross_language,
                "instance_id": instance.id,
                "language": request.language
            },
            session_id=request.session_id,
            language_detected=request.language,
            file_info={
                "file_path": request.file_path,
                "function_name": request.function_name
            }
        )
        
        # Cache results if enabled
        if request.cache_results:
            background_tasks.add_task(
                _cache_analysis_results,
                request,
                response,
                session_manager
            )
        
        log_info(f"Analysis request {request_id} completed successfully")
        return response
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "file_path": request.file_path,
            "function_name": request.function_name
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.post("/explore", response_model=ExplorationResponse)
async def explore_interactive(
    request: ExplorationRequest,
    session_manager: SessionManager = Depends(get_session_manager),
    load_balancer: LoadBalancer = Depends(get_load_balancer)
) -> ExplorationResponse:
    """
    Handle interactive exploration actions.
    
    Supports manual mode exploration with node selection and navigation.
    """
    request_id = str(uuid.uuid4())
    
    try:
        # Get session
        session = await session_manager.get_session(request.session_id)
        if not session:
            raise HTTPException(status_code=404, detail="Session not found")
        
        # Get current exploration state
        exploration_state = session.get_data("exploration_state", {})
        
        # Process action
        if request.action == "select_nodes":
            result = await _handle_node_selection(
                request, exploration_state, load_balancer, session_manager
            )
        elif request.action == "navigate_back":
            result = await _handle_navigation_back(
                request, exploration_state, session_manager
            )
        elif request.action == "get_candidates":
            result = await _handle_get_candidates(
                request, exploration_state, load_balancer
            )
        elif request.action == "view_code":
            result = await _handle_view_code(
                request, exploration_state, load_balancer
            )
        else:
            raise HTTPException(status_code=400, detail=f"Unknown action: {request.action}")
        
        # Update session state
        await session_manager.update_session_data(
            request.session_id, 
            "exploration_state", 
            result["updated_state"]
        )
        
        return ExplorationResponse(
            request_id=request_id,
            action_result=result["action_result"],
            current_node=result["current_node"],
            available_actions=result["available_actions"],
            candidate_nodes=result.get("candidate_nodes", []),
            navigation_stack=result.get("navigation_stack", []),
            session_info={"session_id": request.session_id}
        )
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "session_id": request.session_id,
            "action": request.action
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.get("/function/{function_name}", response_model=FunctionInfoResponse)
async def get_function_info(
    function_name: str,
    file_path: str = Query(..., description="File path"),
    language: Optional[str] = Query(None, description="Language override"),
    include_callers: bool = Query(False, description="Include caller information"),
    include_callees: bool = Query(False, description="Include callee information"),
    include_source: bool = Query(False, description="Include source code"),
    load_balancer: LoadBalancer = Depends(get_load_balancer)
) -> FunctionInfoResponse:
    """
    Get detailed information about a specific function.
    
    Returns function metadata, callers, callees, and optionally source code.
    """
    request_id = str(uuid.uuid4())
    
    try:
        # Select appropriate MCP instance
        instance = await load_balancer.select_instance(language=language)
        if not instance:
            raise HTTPException(status_code=503, detail="No MCP instances available")
        
        # Build MCP request
        mcp_request = {
            "tool": "get_function_info",
            "parameters": {
                "file_path": file_path,
                "function_name": function_name,
                "include_callers": include_callers,
                "include_callees": include_callees,
                "include_source": include_source
            }
        }
        
        # Execute request
        success, mcp_response = await load_balancer.execute_request(instance, mcp_request)
        
        if not success:
            raise HTTPException(
                status_code=500,
                detail=f"Function info request failed: {mcp_response.get('error')}"
            )
        
        result = mcp_response.get("result", {})
        
        return FunctionInfoResponse(
            request_id=request_id,
            function_info=result.get("function_info", {}),
            callers=result.get("callers", []),
            callees=result.get("callees", []),
            source_code=result.get("source_code"),
            file_info=result.get("file_info", {})
        )
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "function_name": function_name,
            "file_path": file_path
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.post("/call-chain", response_model=CallChainResponse)
async def find_call_chain(
    request: CallChainRequest,
    load_balancer: LoadBalancer = Depends(get_load_balancer)
) -> CallChainResponse:
    """
    Find call chain between two functions.
    
    Searches for the shortest path between start and end functions.
    """
    request_id = str(uuid.uuid4())
    
    try:
        # Select appropriate MCP instance
        instance = await load_balancer.select_instance(language=request.language)
        if not instance:
            raise HTTPException(status_code=503, detail="No MCP instances available")
        
        # Build MCP request
        mcp_request = {
            "tool": "get_call_chain",
            "parameters": {
                "start_function": request.start_function,
                "end_function": request.end_function,
                "file_path": request.file_path,
                "max_depth": request.max_depth,
                "bidirectional": request.bidirectional
            }
        }
        
        # Execute request
        success, mcp_response = await load_balancer.execute_request(instance, mcp_request)
        
        if not success:
            raise HTTPException(
                status_code=500,
                detail=f"Call chain analysis failed: {mcp_response.get('error')}"
            )
        
        result = mcp_response.get("result", {})
        
        return CallChainResponse(
            request_id=request_id,
            start_function=request.start_function,
            end_function=request.end_function,
            path_found=result.get("path_found", False),
            call_path=result.get("call_path"),
            path_length=result.get("path_length", 0),
            alternative_paths=result.get("alternative_paths", []),
            analysis_metadata=result.get("analysis_metadata", {})
        )
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "start_function": request.start_function,
            "end_function": request.end_function
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.post("/batch", response_model=BatchAnalysisResponse)
async def batch_analysis(
    request: BatchAnalysisRequest,
    background_tasks: BackgroundTasks,
    load_balancer: LoadBalancer = Depends(get_load_balancer),
    session_manager: SessionManager = Depends(get_session_manager)
) -> BatchAnalysisResponse:
    """
    Perform batch analysis on multiple files.
    
    Analyzes multiple files in parallel and returns combined results.
    """
    request_id = str(uuid.uuid4())
    
    try:
        log_info(f"Starting batch analysis {request_id}", {
            "file_count": len(request.file_paths),
            "parallel_processing": request.parallel_processing
        })
        
        # Create session for batch analysis
        session = await session_manager.create_session(
            session_type="batch_analysis",
            configuration={
                "file_count": len(request.file_paths),
                "max_depth": request.max_depth,
                "parallel_processing": request.parallel_processing
            }
        )
        
        if request.parallel_processing:
            # Process files in parallel
            tasks = [
                _analyze_single_file(
                    file_path, 
                    request, 
                    load_balancer, 
                    session.session_id
                )
                for file_path in request.file_paths
            ]
            results = await asyncio.gather(*tasks, return_exceptions=True)
        else:
            # Process files sequentially
            results = []
            for file_path in request.file_paths:
                result = await _analyze_single_file(
                    file_path, 
                    request, 
                    load_balancer, 
                    session.session_id
                )
                results.append(result)
        
        # Separate successful and failed results
        successful_results = []
        failed_files = []
        
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                failed_files.append({
                    "file_path": request.file_paths[i],
                    "error": str(result)
                })
            else:
                successful_results.append(result)
        
        # Generate summary
        summary = {
            "total_files": len(request.file_paths),
            "successful_analyses": len(successful_results),
            "failed_analyses": len(failed_files),
            "total_nodes": sum(r.call_tree.metadata.get("total_nodes", 0) for r in successful_results),
            "analysis_time": sum(r.processing_time or 0 for r in successful_results)
        }
        
        response = BatchAnalysisResponse(
            request_id=request_id,
            results=successful_results,
            summary=summary,
            failed_files=failed_files
        )
        
        # Handle export if requested
        if request.export_format:
            background_tasks.add_task(
                _export_batch_results,
                response,
                request.export_format,
                session_manager,
                session.session_id
            )
        
        return response
        
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "file_count": len(request.file_paths)
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.post("/dependencies", response_model=DependencyAnalysisResponse)
async def analyze_dependencies(
    request: DependencyAnalysisRequest,
    load_balancer: LoadBalancer = Depends(get_load_balancer)
) -> DependencyAnalysisResponse:
    """
    Analyze file dependencies.
    
    Returns information about imports, includes, and other dependencies.
    """
    request_id = str(uuid.uuid4())
    
    try:
        # Select appropriate MCP instance
        instance = await load_balancer.select_instance(language=request.language)
        if not instance:
            raise HTTPException(status_code=503, detail="No MCP instances available")
        
        # Build MCP request
        mcp_request = {
            "tool": "analyze_dependencies",
            "parameters": {
                "file_path": request.file_path,
                "include_external": request.include_external,
                "include_internal": request.include_internal,
                "depth": request.depth
            }
        }
        
        # Execute request
        success, mcp_response = await load_balancer.execute_request(instance, mcp_request)
        
        if not success:
            raise HTTPException(
                status_code=500,
                detail=f"Dependency analysis failed: {mcp_response.get('error')}"
            )
        
        result = mcp_response.get("result", {})
        
        return DependencyAnalysisResponse(
            request_id=request_id,
            file_path=request.file_path,
            dependencies=result.get("dependencies", []),
            dependency_count=result.get("dependency_count", 0),
            external_dependencies=result.get("external_dependencies", []),
            internal_dependencies=result.get("internal_dependencies", []),
            dependency_tree=result.get("dependency_tree", {})
        )
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "file_path": request.file_path
        })
        raise HTTPException(status_code=500, detail=error_response)


@router.post("/validate", response_model=ValidationResponse)
async def validate_file(
    request: ValidateFileRequest,
    load_balancer: LoadBalancer = Depends(get_load_balancer)
) -> ValidationResponse:
    """
    Validate a file for processing.
    
    Checks if the file can be processed and optionally validates syntax.
    """
    request_id = str(uuid.uuid4())
    
    try:
        # Select appropriate MCP instance
        instance = await load_balancer.select_instance(language=request.language)
        if not instance:
            raise HTTPException(status_code=503, detail="No MCP instances available")
        
        # Build MCP request
        mcp_request = {
            "tool": "validate_file",
            "parameters": {
                "file_path": request.file_path,
                "language": request.language,
                "check_syntax": request.check_syntax
            }
        }
        
        # Execute request
        success, mcp_response = await load_balancer.execute_request(instance, mcp_request)
        
        if not success:
            # File validation can fail, but we still want to return the validation result
            result = mcp_response.get("result", {})
            if "valid" in result:
                # MCP returned validation result even though success=False
                pass
            else:
                raise HTTPException(
                    status_code=500,
                    detail=f"File validation failed: {mcp_response.get('error')}"
                )
        else:
            result = mcp_response.get("result", {})
        
        return ValidationResponse(
            request_id=request_id,
            valid=result.get("valid", False),
            file_path=request.file_path,
            language=result.get("language"),
            file_size=result.get("file_size", 0),
            line_count=result.get("line_count"),
            syntax_errors=result.get("syntax_errors", []),
            warnings=result.get("warnings", [])
        )
        
    except HTTPException:
        raise
    except Exception as e:
        error_response = handle_error(e, {
            "request_id": request_id,
            "file_path": request.file_path
        })
        raise HTTPException(status_code=500, detail=error_response)


# Helper functions

async def _process_cross_language_calls(
    call_tree_data: Dict[str, Any],
    load_balancer: LoadBalancer,
    session_manager: SessionManager,
    session_id: str
):
    """Process cross-language calls in background."""
    try:
        cross_language_calls = call_tree_data.get("cross_language_calls", [])
        
        for call in cross_language_calls:
            target_language = call.get("target_language")
            if not target_language:
                continue
            
            instance = await load_balancer.select_instance(language=target_language)
            if not instance:
                continue
            
            # Process the cross-language call
            mcp_request = {
                "tool": "build_call_tree",
                "parameters": {
                    "entry_point": call.get("target_function"),
                    "max_depth": 5,  # Limited depth for cross-language calls
                    "session_id": session_id
                }
            }
            
            success, response = await load_balancer.execute_request(instance, mcp_request)
            if success:
                # Merge the results back into the main call tree
                await session_manager.update_session_data(
                    session_id,
                    f"cross_language_result_{call.get('call_id')}",
                    response.get("result")
                )
    
    except Exception as e:
        log_warning(f"Cross-language processing failed: {e}")


async def _cache_analysis_results(
    request: AnalysisRequest,
    response: AnalysisResponse,
    session_manager: SessionManager
):
    """Cache analysis results for future use."""
    try:
        cache_key = f"analysis_{hash(request.file_path + str(request.function_name))}"
        await session_manager.cache_data(cache_key, response.dict(), ttl_seconds=3600)
    except Exception as e:
        log_warning(f"Failed to cache analysis results: {e}")


async def _analyze_single_file(
    file_path: str,
    batch_request: BatchAnalysisRequest,
    load_balancer: LoadBalancer,
    session_id: str
) -> AnalysisResponse:
    """Analyze a single file for batch processing."""
    # Create individual analysis request
    request = AnalysisRequest(
        file_path=file_path,
        function_name=batch_request.entry_points[0] if batch_request.entry_points else None,
        language=batch_request.language,
        max_depth=batch_request.max_depth,
        session_id=session_id
    )
    
    # This would call the analyze_code function
    # For now, return a placeholder
    return AnalysisResponse(
        call_tree={},
        analysis_metadata={"file_path": file_path},
        session_id=session_id
    )


async def _export_batch_results(
    response: BatchAnalysisResponse,
    export_format: str,
    session_manager: SessionManager,
    session_id: str
):
    """Export batch analysis results in background."""
    try:
        # Implementation would depend on export format
        export_data = response.dict()
        await session_manager.update_session_data(
            session_id,
            "export_data",
            export_data
        )
    except Exception as e:
        log_warning(f"Failed to export batch results: {e}")


async def _handle_node_selection(
    request: ExplorationRequest,
    exploration_state: Dict[str, Any],
    load_balancer: LoadBalancer,
    session_manager: SessionManager
) -> Dict[str, Any]:
    """Handle node selection in exploration mode."""
    # Implementation for node selection logic
    return {
        "action_result": {"selected_nodes": request.parameters.get("selections", [])},
        "current_node": {},
        "available_actions": ["select_nodes", "navigate_back", "view_code"],
        "updated_state": exploration_state
    }


async def _handle_navigation_back(
    request: ExplorationRequest,
    exploration_state: Dict[str, Any],
    session_manager: SessionManager
) -> Dict[str, Any]:
    """Handle navigation back in exploration mode."""
    # Implementation for navigation back logic
    return {
        "action_result": {"navigated": True},
        "current_node": {},
        "available_actions": ["select_nodes", "navigate_back", "view_code"],
        "updated_state": exploration_state
    }


async def _handle_get_candidates(
    request: ExplorationRequest,
    exploration_state: Dict[str, Any],
    load_balancer: LoadBalancer
) -> Dict[str, Any]:
    """Handle getting candidate nodes."""
    # Implementation for getting candidate nodes
    return {
        "action_result": {"candidates_retrieved": True},
        "current_node": {},
        "available_actions": ["select_nodes", "navigate_back", "view_code"],
        "candidate_nodes": [],
        "updated_state": exploration_state
    }


async def _handle_view_code(
    request: ExplorationRequest,
    exploration_state: Dict[str, Any],
    load_balancer: LoadBalancer
) -> Dict[str, Any]:
    """Handle code viewing in exploration mode."""
    # Implementation for code viewing
    return {
        "action_result": {"code_viewed": True},
        "current_node": {},
        "available_actions": ["select_nodes", "navigate_back", "view_code"],
        "updated_state": exploration_state
    }