"""
Telemetry API Routes
Query telemetry data from extractor database with team assignments
"""

from typing import List, Optional
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, status, Query
from pydantic import BaseModel, Field
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, and_, or_, text
import fnmatch
import json
import uuid

from app.core.database import get_business_db, get_extractor_db
from app.core.security import get_current_user
from app.models.business import Team, TeamFileAssignment, TelemetryAnalysis, MetricTeamAssignment
from app.models.extractor import Metric, MetricOwner, UkmMetric

router = APIRouter()


# Pydantic models
class TelemetryPointResponse(BaseModel):
    """Telemetry point response model"""
    id: int
    name: str
    type: str
    description: Optional[str]
    units: Optional[str]
    component: Optional[str]
    file_path: str
    line_number: Optional[int]
    owner: Optional[str]
    assigned_team_id: Optional[str] = None
    assigned_team_name: Optional[str] = None
    analysis_status: str = "NOT_ANALYZED"
    analysis_notes: Optional[str] = None
    analyzed_by: Optional[str] = None
    analyzed_at: Optional[str] = None
    metadata: Optional[dict] = {}

    class Config:
        from_attributes = True


class TelemetryListResponse(BaseModel):
    """Telemetry list response model"""
    success: bool
    data: List[TelemetryPointResponse]
    total: int
    page: int
    page_size: int
    message: Optional[str] = None


class TelemetryStatsResponse(BaseModel):
    """Telemetry statistics response"""
    success: bool
    data: dict
    message: Optional[str] = None


def match_file_pattern(file_path: str, pattern: str) -> bool:
    """Check if file path matches pattern (glob style)"""
    return fnmatch.fnmatch(file_path, pattern)


async def get_analysis_status_for_metric(
    metric_id: int,
    db: AsyncSession
) -> Optional[dict]:
    """Get analysis status for a metric"""
    query = select(
        TelemetryAnalysis.analysis_status,
        TelemetryAnalysis.analysis_notes,
        TelemetryAnalysis.analyzed_by,
        TelemetryAnalysis.analyzed_at
    ).where(
        TelemetryAnalysis.metric_id == metric_id
    )

    result = await db.execute(query)
    analysis = result.first()

    if analysis:
        return {
            "analysis_status": analysis.analysis_status,
            "analysis_notes": analysis.analysis_notes,
            "analyzed_by": analysis.analyzed_by,
            "analyzed_at": analysis.analyzed_at.isoformat() if analysis.analyzed_at else None
        }

    # Return default status if no analysis record exists
    return {
        "analysis_status": "NOT_ANALYZED",
        "analysis_notes": None,
        "analyzed_by": None,
        "analyzed_at": None
    }


async def get_team_assignment_for_file(
    file_path: str,
    db: AsyncSession
) -> Optional[dict]:
    """Get team assignment for a file path"""
    query = select(
        TeamFileAssignment.team_id,
        Team.name.label('team_name'),
        TeamFileAssignment.file_pattern
    ).join(
        Team, TeamFileAssignment.team_id == Team.id
    ).where(
        and_(
            TeamFileAssignment.is_active == True,
            TeamFileAssignment.file_pattern != ''
        )
    )

    result = await db.execute(query)
    assignments = result.fetchall()

    # Find first matching pattern
    for assignment in assignments:
        if match_file_pattern(file_path, assignment.file_pattern):
            return {
                "team_id": assignment.team_id,
                "team_name": assignment.team_name,
                "pattern": assignment.file_pattern
            }

    return None


async def get_team_assignment_for_metric(
    metric_id: int,
    db: AsyncSession
) -> Optional[dict]:
    """Get direct team assignment for a specific metric"""
    query = select(
        MetricTeamAssignment.team_name,
        MetricTeamAssignment.assignment_reason,
        MetricTeamAssignment.assigned_at
    ).where(
        MetricTeamAssignment.metric_id == metric_id
    )

    result = await db.execute(query)
    assignment = result.first()

    if assignment:
        return {
            "team_id": None,  # Direct assignments use team_name as identifier
            "team_name": assignment.team_name,
            "assignment_reason": assignment.assignment_reason,
            "assigned_at": assignment.assigned_at.isoformat() if assignment.assigned_at else None
        }

    return None


@router.get("/", response_model=TelemetryListResponse)
async def get_telemetry_points(
    page: int = Query(1, ge=1),
    page_size: int = Query(50, ge=1, le=1000),
    type_filter: Optional[str] = Query(None),
    component_filter: Optional[str] = Query(None),
    team_filter: Optional[str] = Query(None),
    analysis_status_filter: Optional[str] = Query(None),
    search: Optional[str] = Query(None),
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get telemetry points with optional filters"""
    try:
        # Get team name if team_filter is provided
        team_name_filter = None
        if team_filter:
            # If team_filter looks like a UUID, get team name
            if len(team_filter) > 20 and '-' in team_filter:  # Likely a UUID
                try:
                    from uuid import UUID
                    # Query team name by ID
                    team_query = select(Team.name).where(Team.id == team_filter)
                    team_result = await business_db.execute(team_query)
                    team_row = team_result.first()
                    if team_row:
                        team_name_filter = team_row.name
                    else:
                        team_name_filter = None
                except Exception as e:
                    # If UUID parsing or team lookup fails, continue with None
                    team_name_filter = None
            else:
                team_name_filter = team_filter

        # Build base query - same as stats endpoint for consistency
        query = select(
            Metric.id,
            Metric.name,
            Metric.type,
            Metric.summary,
            Metric.units,
            Metric.component,
            Metric.file_path,
            Metric.line_number,
            MetricOwner.owner_email.label('owner_name')
        ).outerjoin(
            MetricOwner, Metric.id == MetricOwner.metric_id
        )

        # Apply filters (same logic as before)
        filters = []
        if type_filter:
            filters.append(Metric.type == type_filter)
        if component_filter:
            filters.append(Metric.component == component_filter)
        if search:
            search_pattern = f"%{search}%"
            filters.append(
                or_(
                    Metric.name.ilike(search_pattern),
                    Metric.summary.ilike(search_pattern),
                    Metric.component.ilike(search_pattern)
                )
            )

        # Apply team filter if specified - this filters at the SQL level
        if team_name_filter:
            # Subquery to get metric IDs assigned to the specified team
            team_metrics_query = select(
                MetricTeamAssignment.metric_id
            ).where(
                MetricTeamAssignment.team_name == team_name_filter
            )

            # Add filter to only include metrics assigned to this team
            filters.append(Metric.id.in_(team_metrics_query))

        if filters:
            query = query.where(and_(*filters))

        # Count total using distinct IDs to avoid duplicates from LEFT JOIN
        count_query = select(func.count(func.distinct(Metric.id))).select_from(Metric).outerjoin(
            MetricOwner, Metric.id == MetricOwner.metric_id
        )

        # Apply the same filters to count query
        if filters:
            count_query = count_query.where(and_(*filters))

        total_result = await extractor_db.execute(count_query)
        total = total_result.scalar() or 0

        # Apply pagination
        offset = (page - 1) * page_size
        query = query.offset(offset).limit(page_size)

        result = await extractor_db.execute(query)
        metrics = result.fetchall()

        # Build response with team assignments and analysis status (only for paginated results)
        telemetry_points = []
        for metric in metrics:
            # Get direct team assignment for this metric first
            team_assignment = await get_team_assignment_for_metric(
                metric.id, business_db
            )

            # If no direct assignment, fall back to file pattern assignment
            if not team_assignment:
                team_assignment = await get_team_assignment_for_file(
                    metric.file_path, business_db
                )

            # Get analysis status for this metric
            analysis_status = await get_analysis_status_for_metric(
                metric.id, business_db
            )

            # Team filtering is now done at SQL level, so no need to filter here

            # Apply analysis status filter if specified
            if analysis_status_filter:
                if analysis_status["analysis_status"] != analysis_status_filter:
                    continue

            telemetry_point = TelemetryPointResponse(
                id=metric.id,
                name=metric.name,
                type=metric.type,
                description=metric.summary,
                units=metric.units,
                component=metric.component,
                file_path=metric.file_path,
                line_number=metric.line_number,
                owner=metric.owner_name,
                assigned_team_id=team_assignment["team_id"] if team_assignment else None,
                assigned_team_name=team_assignment["team_name"] if team_assignment else None,
                analysis_status=analysis_status["analysis_status"],
                analysis_notes=analysis_status["analysis_notes"],
                analyzed_by=analysis_status["analyzed_by"],
                analyzed_at=analysis_status["analyzed_at"],
                metadata={
                    "assignment_pattern": team_assignment.get("pattern"),
                    "assignment_reason": team_assignment.get("assignment_reason"),
                    "assigned_at": team_assignment.get("assigned_at")
                }
            )
            telemetry_points.append(telemetry_point)

        return TelemetryListResponse(
            success=True,
            data=telemetry_points,
            total=total,
            page=page,
            page_size=page_size
        )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry points: {str(e)}"
        )


@router.get("/{telemetry_id}", response_model=TelemetryPointResponse)
async def get_telemetry_point(
    telemetry_id: int,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get specific telemetry point details"""
    try:
        # Query metric
        query = select(
            Metric.id,
            Metric.name,
            Metric.type,
            Metric.summary,
            Metric.units,
            Metric.component,
            Metric.file_path,
            Metric.line_number,
            MetricOwner.owner_email.label('owner_name')
        ).outerjoin(
            MetricOwner, Metric.id == MetricOwner.metric_id
        ).where(Metric.id == telemetry_id)

        result = await extractor_db.execute(query)
        metric = result.first()

        if not metric:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="Telemetry point not found"
            )

        # Get direct team assignment for this metric first
        team_assignment = await get_team_assignment_for_metric(
            metric.id, business_db
        )

        # If no direct assignment, fall back to file pattern assignment
        if not team_assignment:
            team_assignment = await get_team_assignment_for_file(
                metric.file_path, business_db
            )

        # Get analysis status for this metric
        analysis_status = await get_analysis_status_for_metric(
            metric.id, business_db
        )

        return TelemetryPointResponse(
            id=metric.id,
            name=metric.name,
            type=metric.type,
            description=metric.summary,
            units=metric.units,
            component=metric.component,
            file_path=metric.file_path,
            line_number=metric.line_number,
            owner=metric.owner_name,
            assigned_team_id=team_assignment["team_id"] if team_assignment else None,
            assigned_team_name=team_assignment["team_name"] if team_assignment else None,
            analysis_status=analysis_status["analysis_status"],
            analysis_notes=analysis_status["analysis_notes"],
            analyzed_by=analysis_status["analyzed_by"],
            analyzed_at=analysis_status["analyzed_at"],
            metadata={
                "assignment_pattern": team_assignment.get("pattern"),
                "assignment_reason": team_assignment.get("assignment_reason"),
                "assigned_at": team_assignment.get("assigned_at")
            }
        )

    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry point: {str(e)}"
        )


@router.get("/stats/overview", response_model=TelemetryStatsResponse)
async def get_telemetry_stats(
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get telemetry overview statistics"""
    try:
        # Use simple count queries - same base data as list endpoint
        total_query = select(func.count(Metric.id))
        total_result = await extractor_db.execute(total_query)
        total_count = total_result.scalar() or 0

        type_stats_query = select(
            Metric.type,
            func.count(Metric.id).label('count')
        ).group_by(Metric.type)
        type_result = await extractor_db.execute(type_stats_query)
        type_stats = {row.type: row.count for row in type_result}

        component_stats_query = select(
            Metric.component,
            func.count(Metric.id).label('count')
        ).where(Metric.component.isnot(None)).group_by(Metric.component).limit(10)
        component_result = await extractor_db.execute(component_stats_query)
        component_stats = {row.component: row.count for row in component_result}

        # Get analysis status statistics
        analysis_stats_query = select(
            TelemetryAnalysis.analysis_status,
            func.count(TelemetryAnalysis.metric_id).label('count')
        ).group_by(TelemetryAnalysis.analysis_status)
        analysis_result = await business_db.execute(analysis_stats_query)
        analysis_stats = {row.analysis_status: row.count for row in analysis_result}

        # Default to all metrics being not analyzed if no analysis data exists
        analyzed_count = analysis_stats.get('ANALYZED', 0)
        not_analyzed_count = analysis_stats.get('NOT_ANALYZED', total_count)
        needs_collection_count = analysis_stats.get('NEEDS_COLLECTION', 0)
        not_needed_count = analysis_stats.get('NOT_NEEDED', 0)

        stats = {
            "total_metrics": total_count,
            "by_type": type_stats,
            "top_components": component_stats,
            "by_analysis_status": {
                "analyzed": analyzed_count,
                "not_analyzed": not_analyzed_count,
                "needs_collection": needs_collection_count,
                "not_needed": not_needed_count
            },
            "unassigned": total_count  # All metrics are currently unassigned since we don't use team filtering in stats
        }

        return TelemetryStatsResponse(
            success=True,
            data=stats,
            message="Telemetry statistics retrieved successfully"
        )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry statistics: {str(e)}"
        )


# Pydantic models for analysis management
class AnalysisUpdateRequest(BaseModel):
    """Request model for updating analysis status"""
    analysis_status: str = Field(..., description="Analysis status: ANALYZED, NOT_ANALYZED, NOT_NEEDED, NEEDS_COLLECTION")
    analysis_notes: Optional[str] = None
    team_id: Optional[str] = None


class AnalysisUpdateResponse(BaseModel):
    """Response model for analysis update"""
    success: bool
    message: str


@router.put("/{telemetry_id}/analysis", response_model=AnalysisUpdateResponse)
async def update_telemetry_analysis(
    telemetry_id: int,
    request: AnalysisUpdateRequest,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Update analysis status for a telemetry point"""
    try:
        # Verify telemetry point exists
        metric_query = select(Metric.id).where(Metric.id == telemetry_id)
        metric_result = await extractor_db.execute(metric_query)
        if not metric_result.first():
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="Telemetry point not found"
            )

        # Check if analysis record exists
        existing_query = select(TelemetryAnalysis).where(
            TelemetryAnalysis.metric_id == telemetry_id
        )
        existing_result = await business_db.execute(existing_query)
        existing_analysis = existing_result.first()

        if existing_analysis:
            # Update existing record
            existing_analysis.analysis_status = request.analysis_status
            existing_analysis.analysis_notes = request.analysis_notes
            existing_analysis.analyzed_by = current_user["email"]
            existing_analysis.analyzed_at = datetime.utcnow()
            existing_analysis.updated_at = datetime.utcnow()

            if request.team_id:
                existing_analysis.team_id = request.team_id
        else:
            # Create new analysis record
            new_analysis = TelemetryAnalysis(
                id=str(uuid.uuid4()),
                metric_id=telemetry_id,
                analysis_status=request.analysis_status,
                analysis_notes=request.analysis_notes,
                analyzed_by=current_user["email"],
                analyzed_at=datetime.utcnow(),
                team_id=request.team_id
            )
            business_db.add(new_analysis)

        await business_db.commit()

        return AnalysisUpdateResponse(
            success=True,
            message=f"Analysis status updated to {request.analysis_status}"
        )

    except HTTPException:
        raise
    except Exception as e:
        await business_db.rollback()
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to update analysis status: {str(e)}"
        )


@router.put("/batch-analysis", response_model=AnalysisUpdateResponse)
async def batch_update_analysis(
    telemetry_ids: List[int],
    request: AnalysisUpdateRequest,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Update analysis status for multiple telemetry points"""
    try:
        # Verify all telemetry points exist
        metric_query = select(Metric.id).where(Metric.id.in_(telemetry_ids))
        metric_result = await extractor_db.execute(metric_query)
        existing_ids = [row.id for row in metric_result.fetchall()]

        if len(existing_ids) != len(telemetry_ids):
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="One or more telemetry points not found"
            )

        updated_count = 0

        for telemetry_id in telemetry_ids:
            # Check if analysis record exists
            existing_query = select(TelemetryAnalysis).where(
                TelemetryAnalysis.metric_id == telemetry_id
            )
            existing_result = await business_db.execute(existing_query)
            existing_analysis = existing_result.first()

            if existing_analysis:
                # Update existing record
                existing_analysis.analysis_status = request.analysis_status
                existing_analysis.analysis_notes = request.analysis_notes
                existing_analysis.analyzed_by = current_user["email"]
                existing_analysis.analyzed_at = datetime.utcnow()
                existing_analysis.updated_at = datetime.utcnow()

                if request.team_id:
                    existing_analysis.team_id = request.team_id
            else:
                # Create new analysis record
                new_analysis = TelemetryAnalysis(
                    id=str(uuid.uuid4()),
                    metric_id=telemetry_id,
                    analysis_status=request.analysis_status,
                    analysis_notes=request.analysis_notes,
                    analyzed_by=current_user["email"],
                    analyzed_at=datetime.utcnow(),
                    team_id=request.team_id
                )
                business_db.add(new_analysis)

            updated_count += 1

        await business_db.commit()

        return AnalysisUpdateResponse(
            success=True,
            message=f"Updated analysis status for {updated_count} telemetry points to {request.analysis_status}"
        )

    except HTTPException:
        raise
    except Exception as e:
        await business_db.rollback()
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to update batch analysis status: {str(e)}"
        )