"""
Directory Tree API Routes
Provides hierarchical directory structure for telemetry metrics
"""

from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy.ext.asyncio import AsyncSession
from typing import Optional, List
from pydantic import BaseModel
import json
import uuid
import os
import sqlite3
from datetime import datetime

from app.services.directory_tree_service import directory_tree_service
from app.core.database import get_extractor_db, get_business_db
from app.models.extractor import Metric
from app.models.business import MetricTeamAssignment, Directory
from sqlalchemy import select, func, and_
import logging

logger = logging.getLogger(__name__)

router = APIRouter()

# Pydantic models for batch operations
class BatchUpdateMetricsTeamRequest(BaseModel):
    directory_path: str
    team_name: str
    analysis_notes: Optional[str] = None

class BatchAnalyzeMetricsRequest(BaseModel):
    directory_path: str
    should_collect: bool
    analysis_reason: str


@router.post("/batch-update-metrics-team")
async def batch_update_metrics_team(
    request: BatchUpdateMetricsTeamRequest,
    extractor_db: AsyncSession = Depends(get_extractor_db),
    business_db: AsyncSession = Depends(get_business_db),
):
    """
    Batch update team assignment for all metrics in a directory
    Args:
        request: Batch update request with directory_path, team_name, and analysis_notes
        extractor_db: Extractor database session
        business_db: Business database session
    Returns:
        Batch update operation result
    """
    try:
        logger.info(f"🔄 [DirectoryAPI] Batch metrics team update request received")
        logger.info(f"   Directory: {request.directory_path}")
        logger.info(f"   Target team: {request.team_name}")
        logger.info(f"   Analysis notes: {request.analysis_notes}")

        import sqlite3
        import os
        import uuid

        # Get database path for direct SQL query
        current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))  # backend
        backend_data_dir = os.path.join(current_dir, 'data')
        db_path = os.path.join(backend_data_dir, 'uma_insight.db')
        if not os.path.exists(db_path):
            project_root = os.path.dirname(current_dir)  # project root
            db_path = os.path.join(project_root, 'backend', 'data', 'uma_insight.db')

        # Connect to SQLite database directly for batch operations
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        try:
            # Get team ID if not 'unassigned'
            team_id = None
            if request.team_name.lower() != "unassigned":
                cursor.execute("SELECT id FROM teams WHERE name = ?", (request.team_name,))
                team_result = cursor.fetchone()
                if not team_result:
                    logger.error(f"❌ [DirectoryAPI] Team not found: {request.team_name}")
                    raise HTTPException(status_code=404, detail=f"Team '{request.team_name}' not found")
                team_id = team_result[0]

            # Get directory_id from path for precise filtering
            cursor.execute(
                "SELECT id FROM directories WHERE path = ?",
                (request.directory_path,)
            )
            directory_result = cursor.fetchone()
            if not directory_result:
                logger.error(f"❌ [DirectoryAPI] Directory not found: {request.directory_path}")
                raise HTTPException(status_code=404, detail=f"Directory '{request.directory_path}' not found")
            directory_id = directory_result[0]

            # Find all metrics in the directory using directory_id for precise matching
            cursor.execute(
                "SELECT id FROM metrics WHERE directory_id = ?",
                (directory_id,)
            )
            metric_ids = [row[0] for row in cursor.fetchall()]
            if not metric_ids:
                logger.warning(f"⚠️ [DirectoryAPI] No metrics found in directory: {request.directory_path}")
                return {
                    "success": True,
                    "message": f"No metrics found in directory {request.directory_path}",
                    "updated_count": 0
                }

            logger.info(f"📊 [DirectoryAPI] Found {len(metric_ids)} metrics to update")

            # Batch update operations for each metric
            current_timestamp = datetime.now().isoformat()
            assignment_reason = request.analysis_notes or "batch directory assignment"

            for metric_id in metric_ids:
                # 1. Update metric team assignments
                if team_id:
                    # First, delete existing assignment for this metric to avoid duplicates
                    cursor.execute(
                        "DELETE FROM metric_team_assignments WHERE metric_id = ?",
                        (metric_id,)
                    )
                    # Then insert the new assignment
                    cursor.execute(
                        """INSERT INTO metric_team_assignments
                           (id, metric_id, team_name, assignment_reason, assigned_at)
                           VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)""",
                        (str(uuid.uuid4()), metric_id, request.team_name, assignment_reason)
                    )
                else:
                    # Remove assignment for 'unassigned'
                    cursor.execute(
                        "DELETE FROM metric_team_assignments WHERE metric_id = ?",
                        (metric_id,)
                    )

                # 2. No need to update metrics table - team assignment is stored in metric_team_assignments table only

                # 3. Create new telemetry_analysis record with team assignment info
                # Using "NOT_NEEDED" as default analysis_status since user requirement is not to update analysis status
                analysis_id = str(uuid.uuid4())
                cursor.execute(
                    """INSERT OR REPLACE INTO telemetry_analysis
                       (id, metric_id, analysis_status, assigned_team_id, analysis_notes, analyzed_by, analyzed_at)
                       VALUES (?, ?, ?, ?, ?, ?, ?)""",
                    (analysis_id, metric_id, "NOT_NEEDED", team_id, request.analysis_notes, "system_batch_update", current_timestamp)
                )
            conn.commit()
            logger.info(f"✅ [DirectoryAPI] Batch metrics team update completed successfully")
            logger.info(f"   Updated {len(metric_ids)} metrics with team assignment and analysis records")
            return {
                "success": True,
                "message": f"Successfully updated team assignment and created analysis records for {len(metric_ids)} metrics in directory {request.directory_path}",
                "updated_count": len(metric_ids)
            }
        finally:
            conn.close()
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ [DirectoryAPI] Failed to batch update metrics team assignments: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to batch update metrics team assignments: {str(e)}"
        )


@router.get("/directory-list")
async def get_directory_list(
    extractor_db: AsyncSession = Depends(get_extractor_db),
    business_db: AsyncSession = Depends(get_business_db),
    page: int = Query(1, ge=1, description="Page number"),
    page_size: int = Query(50, ge=1, le=200, description="Items per page"),
    search: Optional[str] = Query(None, description="Search in directory path"),
    sort_by: Optional[str] = Query("metric_count", description="Sort field"),
    sort_order: Optional[str] = Query("desc", description="Sort order: asc or desc"),
    min_metrics: Optional[int] = Query(None, description="Minimum metrics count filter"),
    team_filter: Optional[str] = Query(None, description="Filter by team name or 'unassigned'")
):
    """
    Get list of directories containing telemetry metrics with counts and statistics
    Optimized for performance by excluding team assignment information
    Args:
        extractor_db: Extractor database session
        business_db: Business database session
        page: Page number
        page_size: Items per page
        search: Search in directory path
        sort_by: Sort field (metric_count, path, analyzed_count)
        sort_order: Sort order (asc, desc)
        min_metrics: Filter directories with at least this many metrics
        team_filter: Filter by team name or 'unassigned' (Note: will return empty results for performance)
    Returns:
        Paginated list of directories with telemetry metrics statistics (team assignment data excluded for performance)
    """
    try:
        logger.info(f"📁 [DirectoryAPI] Directory list request received")
        logger.info(f"   Page: {page}, Page size: {page_size}")
        logger.info(f"   Search: {search}, Sort: {sort_by} {sort_order}")
        logger.info(f"   Team Filter: {team_filter}")

        # Use the optimized directory service for real-time counts
        from app.services.directory_tree_service_optimized import directory_tree_service_optimized

        # Get directory statistics with real-time counts
        result = await directory_tree_service_optimized.get_directory_statistics(
            extractor_db=extractor_db,
            business_db=business_db,
            directory_path=None  # Get all directories
        )

        if not result.get("success"):
            raise HTTPException(
                status_code=500,
                detail="Failed to retrieve directory statistics"
            )

        # Extract flat list of directories from tree structure
        all_directories = []
        def extract_directories_from_tree(nodes):
            for node in nodes:
                all_directories.append(node)
                if 'children' in node and node['children']:
                    extract_directories_from_tree(node['children'])

        extract_directories_from_tree(result['data'])

        # Apply filters
        filtered_directories = []
        for directory in all_directories:
            metric_count = directory.get('metric_count', 0)
            # Skip directories with no metrics
            if metric_count == 0:
                continue
            # Apply minimum metrics filter
            if min_metrics and metric_count < min_metrics:
                continue
            # Apply search filter
            if search:
                search_lower = search.lower()
                directory_path = directory.get('path', '').lower()
                directory_name = directory.get('name', '').lower()
                if search_lower not in directory_path and search_lower not in directory_name:
                    continue
            # Skip team filtering for performance optimization since assigned_teams are not populated
            # If team_filter is specified and not 'all', skip all directories since we can't filter without team data
            if team_filter and team_filter.lower() != "all":
                # For now, return empty result when team filter is applied without team assignment data
                # This maintains performance while handling the filter parameter
                continue

            # Transform to expected format (with optimized team count)
            transformed_directory = {
                "id": directory['id'],  # Add directory_id for frontend use
                "directory_path": directory['path'],
                "file_path": f"{directory['path']}/histograms.xml",  # Standard file path
                "metric_count": metric_count,
                "analyzed_count": directory.get('analyzed_count', 0),
                "not_analyzed_count": directory.get('not_analyzed_count', 0),
                "analysis_progress": directory.get('analysis_progress', {}).get('overall_progress', 0),
                "file_name": f"{directory['name']}/histograms.xml",
                "team_id": None,  # Not available in optimized service
                "team_name": None,  # Not available for performance optimization
                "team_count": directory.get('team_count', 0),  # Use optimized team count
                "assignment_confidence": 0,  # Not available in optimized service
                "assignment_method": None,
                "assigned_at": None
                # assigned_teams field removed for performance optimization
            }
            filtered_directories.append(transformed_directory)

        # Apply sorting (removed team_name for performance optimization)
        valid_sort_fields = ['metric_count', 'path', 'name', 'analyzed_count', 'analysis_progress']
        if sort_by not in valid_sort_fields:
            sort_by = 'metric_count'

        # Map frontend sort fields to data fields (removed team_name for performance optimization)
        sort_mapping = {
            'path': 'directory_path',
            'name': 'directory_path',  # Use path for name sorting since name is extracted
            'analyzed_count': 'analyzed_count',
            'analysis_progress': 'analysis_progress'
        }
        sort_field = sort_mapping.get(sort_by, sort_by)
        reverse_order = sort_order.lower() == 'desc'

        # Type-aware sorting to handle mixed data types (removed team_name for performance optimization)
        def get_sort_key(x):
            value = x.get(sort_field)
            if value is None or value == "":
                # Use appropriate default based on sort field type
                if sort_field in ['name', 'path']:
                    return ""  # String fields default to empty string
                else:
                    return 0   # Numeric fields default to zero
            return value

        filtered_directories.sort(key=get_sort_key, reverse=reverse_order)

        # Get total count after filtering
        total = len(filtered_directories)

        # Apply pagination
        offset = (page - 1) * page_size
        paginated_directories = filtered_directories[offset:offset + page_size]

        logger.info(f"✅ [DirectoryAPI] Directory list retrieved successfully using optimized service")
        logger.info(f"   Found {total} total directories, returning {len(paginated_directories)}")

        return {
            "success": True,
            "data": paginated_directories,
            "total": total,
            "page": page,
            "page_size": page_size,
            "filters": {
                "search": search,
                "team_filter": team_filter,
                "min_metrics": min_metrics
            },
            "sort": {
                "sort_by": sort_by,
                "sort_order": sort_order
            }
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ [DirectoryAPI] Failed to get directory list: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to get directory list: {str(e)}"
        )


@router.get("/directory-metrics")
async def get_directory_metrics(
    extractor_db: AsyncSession = Depends(get_extractor_db),
    business_db: AsyncSession = Depends(get_business_db),
    directory_path: str = Query(..., description="Directory path to get metrics from"),
    page: int = Query(1, ge=1, description="Page number"),
    page_size: int = Query(50, ge=1, le=200, description="Items per page"),
    search: Optional[str] = Query(None, description="Search in metric name"),
    type: Optional[str] = Query(None, description="Filter by metric type"),
    component: Optional[str] = Query(None, description="Filter by component"),
    analysis_status: Optional[str] = Query(None, description="Filter by analysis status"),
    sort_by: str = Query("name", description="Sort field"),
    sort_order: str = Query("asc", description="Sort order: asc or desc")
):
    """
    Get list of telemetry metrics within a specific directory

    Args:
        extractor_db: Extractor database session
        business_db: Business database session
        directory_path: Directory path to get metrics from
        page: Page number
        page_size: Items per page
        search: Search in metric name
        type: Filter by metric type (histogram, enum, ukm_event, ukm_metric)
        component: Filter by component
        analysis_status: Filter by analysis status
        sort_by: Sort field (name, type, component, created_at)
        sort_order: Sort order (asc, desc)

    Returns:
        Paginated list of telemetry metrics within the directory
    """
    try:
        logger.info(f"📁 [DirectoryAPI] Directory metrics request received")
        logger.info(f"   Directory: {directory_path}")
        logger.info(f"   Page: {page}, Page size: {page_size}")
        logger.info(f"   Filters: search={search}, type={type}, component={component}, status={analysis_status}")

        # Validate type filter if provided
        valid_types = ["histogram", "enum", "ukm_event", "ukm_metric"]
        if type and type not in valid_types:
            raise HTTPException(
                status_code=400,
                detail=f"Invalid type filter. Must be one of: {valid_types}"
            )

        # Validate analysis_status filter if provided
        valid_statuses = ["ANALYZED", "PENDING", "FAILED"]
        if analysis_status and analysis_status not in valid_statuses:
            raise HTTPException(
                status_code=400,
                detail=f"Invalid analysis_status filter. Must be one of: {valid_statuses}"
            )

        # Build base query to get metrics in the directory
        from sqlalchemy import select, func, and_, or_
        from app.models.extractor import Metric, Directory

        # First get directory_id from path for precise filtering
        directory_query = select(Directory.id).where(Directory.path == directory_path)
        directory_result = await extractor_db.execute(directory_query)
        directory_record = directory_result.scalar_one_or_none()

        if not directory_record:
            raise HTTPException(
                status_code=404,
                detail=f"Directory '{directory_path}' not found"
            )

        directory_id = directory_record

        base_query = select(Metric).where(Metric.directory_id == directory_id)

        # Apply search filter
        if search:
            search_pattern = f"%{search}%"
            base_query = base_query.where(Metric.name.ilike(search_pattern))

        # Apply type filter
        if type:
            base_query = base_query.where(Metric.type == type)

        # Apply component filter
        if component:
            base_query = base_query.where(Metric.component.ilike(f"%{component}%"))

        # Apply analysis_status filter if column exists
        if analysis_status and hasattr(Metric, 'analysis_status'):
            base_query = base_query.where(Metric.analysis_status == analysis_status)

        # Apply sorting
        if sort_by == "name":
            order_column = Metric.name
        elif sort_by == "type":
            order_column = Metric.type
        elif sort_by == "component":
            order_column = Metric.component
        elif sort_by == "created_at" and hasattr(Metric, 'created_at'):
            order_column = Metric.created_at
        else:
            order_column = Metric.name  # Default sort

        if sort_order == "desc":
            base_query = base_query.order_by(order_column.desc())
        else:
            base_query = base_query.order_by(order_column.asc())

        # Get total count
        count_query = select(func.count()).select_from(base_query.subquery())
        total_result = await extractor_db.execute(count_query)
        total = total_result.scalar()

        # Apply pagination
        offset = (page - 1) * page_size
        paginated_query = base_query.offset(offset).limit(page_size)

        # Execute query
        result = await extractor_db.execute(paginated_query)
        metrics = result.scalars().all()

        # Transform data
        metric_data = []
        for metric in metrics:
            metric_data.append({
                "id": str(metric.id),
                "name": metric.name,
                "type": metric.type,
                "component": metric.component,
                "description": getattr(metric, 'description', None),
                "analysis_status": getattr(metric, 'analysis_status', 'PENDING'),
                "analysis_progress": getattr(metric, 'analysis_progress', 0),
                "file_path": metric.file_path,
                "directory_path": directory_path,
                "created_at": getattr(metric, 'created_at', None).isoformat() if getattr(metric, 'created_at', None) else None,
                "updated_at": getattr(metric, 'updated_at', None).isoformat() if getattr(metric, 'updated_at', None) else None,
            })

        logger.info(f"✅ [DirectoryAPI] Directory metrics retrieved successfully")
        logger.info(f"   Found {total} total metrics, returning {len(metric_data)}")

        return {
            "success": True,
            "data": metric_data,
            "total": total,
            "page": page,
            "page_size": page_size,
            "filters_applied": {
                "directory_path": directory_path,
                "search": search,
                "type": type,
                "component": component,
                "analysis_status": analysis_status,
                "sort_by": sort_by,
                "sort_order": sort_order
            }
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ [DirectoryAPI] Failed to get directory metrics: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to get directory metrics: {str(e)}"
        )


@router.post("/batch-analyze-metrics")
async def batch_analyze_metrics(
    request: BatchAnalyzeMetricsRequest,
    extractor_db: AsyncSession = Depends(get_extractor_db),
    business_db: AsyncSession = Depends(get_business_db),
):
    """
    Batch analyze all metrics in a directory

    Args:
        request: Batch analyze request with directory_path
        extractor_db: Extractor database session
        business_db: Business database session

    Returns:
        Batch analyze operation result
    """
    try:
        logger.info(f"🔍 [DirectoryAPI] Batch metrics analysis request received")
        logger.info(f"   Directory: {request.directory_path}")
        logger.info(f"   Should collect: {request.should_collect}")
        logger.info(f"   Analysis reason: {request.analysis_reason}")

        import sqlite3
        import os
        import uuid

        # Get database path for direct SQL query
        current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))  # backend
        backend_data_dir = os.path.join(current_dir, 'data')
        db_path = os.path.join(backend_data_dir, 'uma_insight.db')
        if not os.path.exists(db_path):
            project_root = os.path.dirname(current_dir)  # project root
            db_path = os.path.join(project_root, 'backend', 'data', 'uma_insight.db')

        # Generate analysis task ID
        analysis_task_id = str(uuid.uuid4())

        # Connect to SQLite database directly for batch operations
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()

        try:
            # Get directory_id from path for precise filtering
            cursor.execute(
                "SELECT id FROM directories WHERE path = ?",
                (request.directory_path,)
            )
            directory_result = cursor.fetchone()
            if not directory_result:
                logger.error(f"❌ [DirectoryAPI] Directory not found: {request.directory_path}")
                raise HTTPException(status_code=404, detail=f"Directory '{request.directory_path}' not found")

            directory_id = directory_result[0]

            # Find all metrics in the directory using directory_id for precise matching
            cursor.execute(
                "SELECT id, name, analysis_status FROM metrics WHERE directory_id = ?",
                (directory_id,)
            )
            metrics_info = cursor.fetchall()

            if not metrics_info:
                logger.warning(f"⚠️ [DirectoryAPI] No metrics found in directory: {request.directory_path}")
                return {
                    "success": True,
                    "message": f"No metrics found in directory {request.directory_path}",
                    "analysis_task_id": analysis_task_id,
                    "queued_count": 0
                }

            logger.info(f"📊 [DirectoryAPI] Found {len(metrics_info)} metrics to analyze")

            # Update analysis status for all metrics
            updated_count = 0
            for metric_id, metric_name, current_status in metrics_info:
                # Determine the analysis status based on should_collect
                new_status = 'ANALYZED'
                if request.should_collect:
                    new_status = 'NEEDS_COLLECTION'
                else:
                    new_status = 'NOT_NEEDED'

                # Prepare analysis notes with reason and collect info
                analysis_notes = json.dumps({
                    'should_collect': request.should_collect,
                    'analysis_reason': request.analysis_reason,
                    'batch_operation': True,
                    'timestamp': datetime.now().isoformat()
                })

                # Update metrics analysis status
                cursor.execute(
                    """UPDATE metrics
                       SET analysis_status = ?,
                           analysis_notes = ?,
                           analyzed_at = datetime('now')
                       WHERE id = ?""",
                    (new_status, analysis_notes, metric_id)
                )

                # Update or insert telemetry_analysis record
                cursor.execute(
                    """SELECT id FROM telemetry_analysis WHERE metric_id = ?""",
                    (metric_id,)
                )
                existing_analysis = cursor.fetchone()

                if existing_analysis:
                    # Update existing record
                    cursor.execute(
                        """UPDATE telemetry_analysis
                           SET analysis_status = ?,
                               analysis_notes = ?,
                               analyzed_at = datetime('now')
                           WHERE metric_id = ?""",
                        (new_status, analysis_notes, metric_id)
                    )
                else:
                    # Insert new record
                    cursor.execute(
                        """INSERT INTO telemetry_analysis
                           (id, metric_id, analysis_status, analysis_notes, analyzed_at, created_at, updated_at)
                           VALUES (?, ?, ?, ?, datetime('now'), datetime('now'), datetime('now'))""",
                        (str(uuid.uuid4()), metric_id, new_status, analysis_notes)
                    )
                updated_count += 1

            # Create analysis task record
            try:
                cursor.execute(
                    """INSERT INTO analysis_tasks (id, status, total_metrics, created_at)
                       VALUES (?, ?, ?, CURRENT_TIMESTAMP)""",
                    (analysis_task_id, "queued", len(metrics_info))
                )
            except sqlite3.OperationalError:
                # Table doesn't exist, just skip task tracking
                pass

            conn.commit()

            logger.info(f"✅ [DirectoryAPI] Batch metrics analysis queued successfully")
            logger.info(f"   Task ID: {analysis_task_id}")
            logger.info(f"   Queued {updated_count} metrics for analysis")

            return {
                "success": True,
                "message": f"Successfully queued batch analysis for {len(metrics_info)} metrics in directory {request.directory_path}",
                "analysis_task_id": analysis_task_id,
                "queued_count": len(metrics_info)
            }

        finally:
            conn.close()

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ [DirectoryAPI] Failed to queue batch metrics analysis: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to queue batch metrics analysis: {str(e)}"
        )


@router.get("/{directory_id}/team-distribution")
async def get_directory_team_distribution(
    directory_id: int,
    extractor_db: AsyncSession = Depends(get_extractor_db),
    business_db: AsyncSession = Depends(get_business_db)
):
    """
    Get detailed team distribution for a specific directory

    Args:
        directory_id: Directory ID to get team distribution for
        extractor_db: Extractor database session
        business_db: Business database session

    Returns:
        Team distribution data with UMA/UKM breakdown
    """
    try:
        logger.info(f"🔍 [TeamDistribution] Getting team distribution for directory ID: {directory_id}")

        # Get directory information
        directory_query = select(Directory).where(Directory.id == directory_id)
        directory_result = await business_db.execute(directory_query)
        directory = directory_result.scalar_one_or_none()

        if not directory:
            raise HTTPException(
                status_code=404,
                detail=f"Directory with ID {directory_id} not found"
            )

        # Get metrics in this directory (only histogram and ukm_event types)
        metrics_query = select(Metric.id, Metric.type, Metric.name).where(
            and_(
                Metric.directory_id == directory_id,
                Metric.type.in_(['histogram', 'ukm_event'])
            )
        )
        metrics_result = await extractor_db.execute(metrics_query)
        metrics = metrics_result.fetchall()

        if not metrics:
            logger.info(f"📂 [TeamDistribution] No metrics found for directory {directory.path}")
            return {
                "success": True,
                "data": {
                    "directory_path": directory.path,
                    "directory_name": directory.name,
                    "total_metrics": 0,
                    "teams": [],
                    "unassigned_count": 0,
                    "uma_summary": {"total": 0, "teams": {}},
                    "ukm_summary": {"total": 0, "teams": {}}
                }
            }

        # Get metric IDs
        metric_ids = [metric.id for metric in metrics]

        # Query team assignments for these metrics
        team_assignments_query = select(
            MetricTeamAssignment.metric_id,
            MetricTeamAssignment.team_name
        ).where(
            MetricTeamAssignment.metric_id.in_(metric_ids)
        )

        team_result = await business_db.execute(team_assignments_query)
        team_assignments = team_result.fetchall()

        # Build metric type lookup
        metric_type_lookup = {metric.id: metric.type for metric in metrics}
        metric_name_lookup = {metric.id: metric.name for metric in metrics}

        # Group assignments by team and categorize by UMA/UKM
        team_distribution = {}
        assigned_metric_ids = set()

        for assignment in team_assignments:
            team_name = assignment.team_name
            metric_id = assignment.metric_id
            metric_type = metric_type_lookup.get(metric_id, "unknown")

            assigned_metric_ids.add(metric_id)

            if team_name not in team_distribution:
                team_distribution[team_name] = {
                    "team_name": team_name,
                    "total_count": 0,
                    "uma_metrics": 0,  # histograms only (enum types excluded as per new rule)
                    "ukm_metrics": 0,  # ukm_events + ukm_metrics
                    "metric_types": {"histogram": 0, "enum": 0, "ukm_event": 0, "ukm_metric": 0, "unknown": 0},
                    "assignment_confidence": 0,
                    "last_assigned": None
                }

            team_data = team_distribution[team_name]
            team_data["total_count"] += 1

            # Categorize by UMA vs UKM (enum types excluded as per new rule)
            if metric_type in ["histogram"]:
                team_data["uma_metrics"] += 1
            elif metric_type in ["ukm_event", "ukm_metric"]:
                team_data["ukm_metrics"] += 1

            # Count by specific metric types
            if metric_type in team_data["metric_types"]:
                team_data["metric_types"][metric_type] += 1
            else:
                team_data["metric_types"]["unknown"] += 1

        # Calculate unassigned metrics
        unassigned_count = len(metrics) - len(assigned_metric_ids)

        # Convert to sorted list
        teams_list = sorted(team_distribution.values(), key=lambda x: x["total_count"], reverse=True)

        # Calculate UMA and UKM summaries
        uma_total = sum(team["uma_metrics"] for team in teams_list)
        ukm_total = sum(team["ukm_metrics"] for team in teams_list)

        uma_summary = {"total": uma_total, "teams": {}}
        ukm_summary = {"total": ukm_total, "teams": {}}

        for team in teams_list:
            if team["uma_metrics"] > 0:
                uma_summary["teams"][team["team_name"]] = team["uma_metrics"]
            if team["ukm_metrics"] > 0:
                ukm_summary["teams"][team["team_name"]] = team["ukm_metrics"]

        result_data = {
            "directory_path": directory.path,
            "directory_name": directory.name,
            "total_metrics": len(metrics),
            "teams": teams_list,
            "unassigned_count": unassigned_count,
            "uma_summary": uma_summary,
            "ukm_summary": ukm_summary
        }

        logger.info(f"✅ [TeamDistribution] Retrieved team distribution for {directory.path}: "
                   f"{len(teams_list)} teams, {len(metrics)} total metrics, {unassigned_count} unassigned")

        return {
            "success": True,
            "data": result_data
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"❌ [TeamDistribution] Failed to get team distribution for directory {directory_id}: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"Failed to get team distribution: {str(e)}"
        )