"""
Telemetry API Routes
Query telemetry data from extractor database with team assignments
"""

from typing import List, Optional
from datetime import datetime
from fastapi import APIRouter, Depends, HTTPException, status, Query
from pydantic import BaseModel, Field
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func, and_, or_, text, update
import fnmatch
import json
import uuid

from app.core.database import get_business_db, get_extractor_db
from app.core.security import get_current_user
from app.models.business import Team, TeamFileAssignment, TelemetryAnalysis, MetricTeamAssignment, TeamMember, User, ActivityLog
from app.models.extractor import Metric, MetricOwner, UkmMetric, EnumValue, HistogramDetail, HistogramToken, Token, Variant
import logging

logger = logging.getLogger(__name__)

router = APIRouter()


# Pydantic models
class TokenVariant(BaseModel):
    """Token variant information"""
    variant_name: str
    variant_summary: Optional[str] = None
    variant_summary_zh: Optional[str] = None

    class Config:
        from_attributes = True


class UkmMetricInfo(BaseModel):
    """UKM metric information for UKM events"""
    metric_name: str
    summary: Optional[str] = None
    deprecated: bool = False
    obsolete_message: Optional[str] = None

    class Config:
        from_attributes = True


class TokenInfo(BaseModel):
    """Token information for histogram metrics"""
    token_key: str
    token_name: str
    variants: List[TokenVariant] = []

    class Config:
        from_attributes = True


class EnumValueInfo(BaseModel):
    """Enum value information"""
    value: int
    label: str
    summary: Optional[str]
    summary_zh: Optional[str] = None

    class Config:
        from_attributes = True


class TelemetryPointResponse(BaseModel):
    """Telemetry point response model"""
    id: int
    name: str
    type: str
    description: Optional[str] = None  # Legacy description field
    summary: Optional[str] = None      # Summary field (primary description)
    summary_zh: Optional[str] = None   # Chinese summary field
    units: Optional[str]
    component: Optional[str]
    file_path: str
    line_number: Optional[int]
    expires_after: Optional[str] = None  # Expiration information
    owner: Optional[str]
    assigned_team_id: Optional[str] = None
    assigned_team_name: Optional[str] = None
    analysis_status: str = "NOT_ANALYZED"
    analysis_notes: Optional[str] = None
    analyzed_by: Optional[str] = None
    analyzed_at: Optional[str] = None
    metadata: Optional[dict] = {}
    enum_values: Optional[List[EnumValueInfo]] = None
    enum_summary: Optional[str] = None      # Summary of the associated enum (for histograms with enum)
    enum_summary_zh: Optional[str] = None   # Chinese summary of the associated enum (for histograms with enum)
    tokens: Optional[List[TokenInfo]] = None
    ukm_metrics: Optional[List[UkmMetricInfo]] = None

    class Config:
        from_attributes = True


class TelemetryListResponse(BaseModel):
    """Telemetry list response model"""
    success: bool
    data: List[TelemetryPointResponse]
    total: int
    page: int
    page_size: int
    message: Optional[str] = None


class TelemetryStatsResponse(BaseModel):
    """Telemetry statistics response"""
    success: bool
    data: dict
    message: Optional[str] = None


class TeamStatsResponse(BaseModel):
    """Team analysis statistics response"""
    success: bool
    data: List[dict]
    message: Optional[str] = None


def match_file_pattern(file_path: str, pattern: str) -> bool:
    """Check if file path matches pattern (glob style)"""
    return fnmatch.fnmatch(file_path, pattern)


async def get_analysis_status_for_metric(
    metric_id: int,
    db: AsyncSession
) -> Optional[dict]:
    """Get analysis status for a metric"""
    query = select(
        TelemetryAnalysis.analysis_status,
        TelemetryAnalysis.analysis_notes,
        TelemetryAnalysis.analyzed_by,
        TelemetryAnalysis.analyzed_at
    ).where(
        TelemetryAnalysis.metric_id == metric_id
    )

    result = await db.execute(query)
    analysis = result.first()

    if analysis:
        return {
            "analysis_status": analysis.analysis_status,
            "analysis_notes": analysis.analysis_notes,
            "analyzed_by": analysis.analyzed_by,
            "analyzed_at": analysis.analyzed_at.isoformat() if analysis.analyzed_at else None
        }

    # Return default status if no analysis record exists
    return {
        "analysis_status": "NOT_ANALYZED",
        "analysis_notes": None,
        "analyzed_by": None,
        "analyzed_at": None
    }


async def get_team_assignment_for_file(
    file_path: str,
    db: AsyncSession
) -> Optional[dict]:
    """Get team assignment for a file path"""
    query = select(
        TeamFileAssignment.team_id,
        Team.name.label('team_name'),
        TeamFileAssignment.file_pattern
    ).join(
        Team, TeamFileAssignment.team_id == Team.id
    ).where(
        and_(
            TeamFileAssignment.is_active == True,
            TeamFileAssignment.file_pattern != ''
        )
    )

    result = await db.execute(query)
    assignments = result.fetchall()

    # Find first matching pattern
    for assignment in assignments:
        if match_file_pattern(file_path, assignment.file_pattern):
            return {
                "team_id": assignment.team_id,
                "team_name": assignment.team_name,
                "pattern": assignment.file_pattern
            }

    return None


async def get_team_assignment_for_metric(
    metric_id: int,
    db: AsyncSession
) -> Optional[dict]:
    """Get direct team assignment for a specific metric"""
    query = select(
        MetricTeamAssignment.team_name,
        MetricTeamAssignment.assignment_reason,
        MetricTeamAssignment.assigned_at
    ).where(
        MetricTeamAssignment.metric_id == metric_id
    )

    result = await db.execute(query)
    assignment = result.first()

    if assignment:
        return {
            "team_id": None,  # Direct assignments use team_name as identifier
            "team_name": assignment.team_name,
            "assignment_reason": assignment.assignment_reason,
            "assigned_at": assignment.assigned_at.isoformat() if assignment.assigned_at else None
        }

    return None


@router.get("/", response_model=TelemetryListResponse)
async def get_telemetry_points(
    page: int = Query(1, ge=1),
    page_size: int = Query(50, ge=1, le=1000),
    type_filter: Optional[str] = Query(None),
    component_filter: Optional[str] = Query(None),
    team_filter: Optional[str] = Query(None),
    analysis_status_filter: Optional[str] = Query(None),
    search: Optional[str] = Query(None),
    directory: Optional[str] = Query(None, description="Filter by directory path (deprecated, use directory_id)"),
    directory_id: Optional[int] = Query(None, description="Filter by directory ID for precise matching"),
    expires_after_filter: Optional[str] = Query(None, description="Filter by expiration period (e.g., 'never', '30d', '90d', '1y')"),
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get telemetry points with optional filters"""
    try:
        # Get team name if team_filter is provided
        team_name_filter = None
        if team_filter:
            # If team_filter looks like a UUID, get team name
            if len(team_filter) > 20 and '-' in team_filter:  # Likely a UUID
                try:
                    from uuid import UUID
                    # Query team name by ID
                    team_query = select(Team.name).where(Team.id == team_filter)
                    team_result = await business_db.execute(team_query)
                    team_row = team_result.first()
                    if team_row:
                        team_name_filter = team_row.name
                    else:
                        team_name_filter = None
                except Exception as e:
                    # If UUID parsing or team lookup fails, continue with None
                    team_name_filter = None
            else:
                team_name_filter = team_filter

        # Build base query - use a subquery to handle multiple owners without duplicates
        owners_subquery = select(
            MetricOwner.metric_id,
            func.group_concat(MetricOwner.owner_email, ', ').label('owner_name')
        ).group_by(MetricOwner.metric_id).subquery()

        # Build telemetry analysis subquery for status filtering - use latest record per metric
        # First, find the latest analyzed_at timestamp for each metric
        latest_analysis = select(
            TelemetryAnalysis.metric_id,
            func.max(TelemetryAnalysis.analyzed_at).label('latest_analyzed_at')
        ).group_by(TelemetryAnalysis.metric_id).subquery()

        # Then join to get the full record with the latest timestamp
        telemetry_analysis_subquery = select(
            TelemetryAnalysis.metric_id,
            TelemetryAnalysis.analysis_status,
            TelemetryAnalysis.analysis_notes,
            TelemetryAnalysis.analyzed_by,
            TelemetryAnalysis.analyzed_at
        ).join(
            latest_analysis,
            and_(
                TelemetryAnalysis.metric_id == latest_analysis.c.metric_id,
                TelemetryAnalysis.analyzed_at == latest_analysis.c.latest_analyzed_at
            )
        ).subquery()

        # Build team assignment subquery for direct metric assignments
        team_assignment_subquery = select(
            MetricTeamAssignment.metric_id,
            MetricTeamAssignment.team_name,
            MetricTeamAssignment.assignment_reason,
            MetricTeamAssignment.assigned_at
        ).subquery()

        query = select(
            Metric.id,
            Metric.name,
            Metric.type,
            Metric.summary,
            Metric.units,
            Metric.component,
            Metric.file_path,
            Metric.line_number,
            Metric.expires_after,
            Metric.analysis_status.label('metric_table_status'),
            owners_subquery.c.owner_name,
            telemetry_analysis_subquery.c.analysis_status.label('analysis_table_status'),
            telemetry_analysis_subquery.c.analysis_notes.label('analysis_table_notes'),
            telemetry_analysis_subquery.c.analyzed_by.label('analysis_table_by'),
            telemetry_analysis_subquery.c.analyzed_at.label('analysis_table_at'),
            team_assignment_subquery.c.team_name.label('assigned_team_name'),
            team_assignment_subquery.c.assignment_reason.label('assignment_reason'),
            team_assignment_subquery.c.assigned_at.label('assigned_at')
        ).outerjoin(
            owners_subquery, Metric.id == owners_subquery.c.metric_id
        ).outerjoin(
            telemetry_analysis_subquery, Metric.id == telemetry_analysis_subquery.c.metric_id
        ).outerjoin(
            team_assignment_subquery, Metric.id == team_assignment_subquery.c.metric_id
        )

        # Apply filters (same logic as before)
        filters = []

        # 🔧 NEW: Default filter to exclude enum types - only include histogram and ukm_event types
        # This ensures all telemetry list queries exclude enum types by default
        filters.append(Metric.type.in_(['histogram', 'ukm_event']))

        if type_filter:
            # Handle UMA/UKM categorical filtering
            if type_filter == 'UMA':
                # UMA includes only histogram types (enum types excluded as per new rule)
                filters.append(Metric.type.in_(['histogram']))
            elif type_filter == 'UKM':
                # UKM includes ukm_event type (and potentially ukm_metric if it exists)
                filters.append(Metric.type.in_(['ukm_event', 'ukm_metric']))
            else:
                # Direct type filtering for specific types
                filters.append(Metric.type == type_filter)
        if component_filter:
            filters.append(Metric.component == component_filter)
        if directory_id:
            # Filter metrics by directory ID for precise matching
            filters.append(Metric.directory_id == directory_id)
        elif directory:
            # Filter metrics by directory path (deprecated, may include subdirectories)
            filters.append(Metric.file_path.like(f"{directory}%"))
        if search:
            search_pattern = f"%{search}%"
            filters.append(
                or_(
                    Metric.name.ilike(search_pattern),
                    Metric.summary.ilike(search_pattern),
                    Metric.component.ilike(search_pattern)
                )
            )

        # Apply team filter if specified - this filters at the SQL level
        if team_name_filter:
            # Subquery to get metric IDs assigned to the specified team
            team_metrics_query = select(
                MetricTeamAssignment.metric_id
            ).where(
                MetricTeamAssignment.team_name == team_name_filter
            )

            # Add filter to only include metrics assigned to this team
            filters.append(Metric.id.in_(team_metrics_query))

        # Apply analysis status filter at database level for performance
        if analysis_status_filter:
            if analysis_status_filter == 'NOT_ANALYZED':
                # 🔧 FIX: For NOT_ANALYZED, find metrics that don't have a telemetry analysis record
                # OR have NOT_ANALYZED status in metrics table (fallback for historical data)
                filters.append(
                    or_(
                        telemetry_analysis_subquery.c.analysis_status.is_(None),
                        Metric.analysis_status == 'NOT_ANALYZED'
                    )
                )
            else:
                # 🔧 FIX: For other statuses, prioritize telemetry_analysis data over metrics data
                # This ensures consistency with stats/overview API
                filters.append(
                    or_(
                        telemetry_analysis_subquery.c.analysis_status == analysis_status_filter,
                        and_(
                            telemetry_analysis_subquery.c.analysis_status.is_(None),
                            Metric.analysis_status == analysis_status_filter
                        )
                    )
                )

        # Apply expires_after filter if specified
        if expires_after_filter:
            if expires_after_filter.lower() == 'never':
                # Filter for metrics that never expire
                filters.append(Metric.expires_after.is_(None))
            elif expires_after_filter.lower() == 'has_expiration':
                # Filter for metrics that have an expiration period
                filters.append(Metric.expires_after.isnot(None))
            else:
                # Filter for specific expiration patterns (e.g., '30d', '90d', '1y')
                filters.append(Metric.expires_after == expires_after_filter)

        if filters:
            query = query.where(and_(*filters))

        # 🔧 CRITICAL FIX: Count query must include the same JOINs as the main query
        # This ensures count and results are consistent when using filters
        count_query = select(func.count(func.distinct(Metric.id))).select_from(Metric).outerjoin(
            owners_subquery, Metric.id == owners_subquery.c.metric_id
        ).outerjoin(
            telemetry_analysis_subquery, Metric.id == telemetry_analysis_subquery.c.metric_id
        ).outerjoin(
            team_assignment_subquery, Metric.id == team_assignment_subquery.c.metric_id
        )

        # Apply the same filters to count query
        if filters:
            count_query = count_query.where(and_(*filters))

        total_result = await extractor_db.execute(count_query)
        total = total_result.scalar() or 0

        # Apply pagination
        offset = (page - 1) * page_size
        query = query.offset(offset).limit(page_size)

        result = await extractor_db.execute(query)
        metrics = result.fetchall()

        # 🚀 PERFORMANCE OPTIMIZATION: Pre-load file pattern assignments to avoid N+1 queries
        # Get unique file paths from metrics and batch-fetch file assignments
        unique_file_paths = list(set(metric.file_path for metric in metrics if metric.file_path))

        # Batch fetch all file assignments at once
        file_assignments_cache = {}
        if unique_file_paths:
            file_query = select(
                TeamFileAssignment.file_pattern,
                Team.id.label('team_id'),
                Team.name.label('team_name')
            ).join(
                Team, TeamFileAssignment.team_id == Team.id
            ).where(
                and_(
                    TeamFileAssignment.is_active == True,
                    TeamFileAssignment.file_pattern != ''
                )
            )
            file_result = await business_db.execute(file_query)
            file_assignments = file_result.fetchall()

            # Pre-compute pattern matching for all file paths
            for file_path in unique_file_paths:
                for assignment in file_assignments:
                    if match_file_pattern(file_path, assignment.file_pattern):
                        file_assignments_cache[file_path] = {
                            "team_id": assignment.team_id,
                            "team_name": assignment.team_name,
                            "pattern": assignment.file_pattern,
                            "assignment_reason": "File pattern assignment"
                        }
                        break  # Take first match

        # Build response with optimized team assignments (eliminates N+1 queries)
        telemetry_points = []
        for metric in metrics:
            # 🚀 OPTIMIZED: Use team assignment from JOIN query instead of individual lookups
            team_assignment = None

            # First, try direct assignment from JOIN
            if metric.assigned_team_name:
                team_assignment = {
                    "team_id": None,  # Direct assignments use team_name as identifier
                    "team_name": metric.assigned_team_name,
                    "assignment_reason": metric.assignment_reason or "Direct metric assignment",
                    "assigned_at": metric.assigned_at.isoformat() if metric.assigned_at else None
                }
            else:
                # Fall back to cached file assignment (no individual DB query needed!)
                team_assignment = file_assignments_cache.get(metric.file_path)

            # 🔧 CRITICAL FIX: Use proper status priority for consistency
            # Priority: analysis table (source of truth) > metric table (denormalized) > default
            analysis_status = metric.analysis_table_status or metric.metric_table_status or "NOT_ANALYZED"
            analysis_notes = metric.analysis_table_notes
            analyzed_by = metric.analysis_table_by
            analyzed_at = metric.analysis_table_at.isoformat() if metric.analysis_table_at else None

            telemetry_point = TelemetryPointResponse(
                id=metric.id,
                name=metric.name,
                type=metric.type,
                description=metric.summary,
                units=metric.units,
                component=metric.component,
                file_path=metric.file_path,
                line_number=metric.line_number,
                expires_after=metric.expires_after,  # Add expiration information
                owner=metric.owner_name,
                assigned_team_id=team_assignment["team_id"] if team_assignment else None,
                assigned_team_name=team_assignment["team_name"] if team_assignment else None,
                analysis_status=analysis_status,
                analysis_notes=analysis_notes,
                analyzed_by=analyzed_by,
                analyzed_at=analyzed_at,
                metadata={
                    "assignment_pattern": team_assignment.get("pattern"),
                    "assignment_reason": team_assignment.get("assignment_reason"),
                    "assigned_at": team_assignment.get("assigned_at")
                }
            )
            telemetry_points.append(telemetry_point)

        return TelemetryListResponse(
            success=True,
            data=telemetry_points,
            total=total,
            page=page,
            page_size=page_size
        )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry points: {str(e)}"
        )


@router.get("/{telemetry_id}", response_model=TelemetryPointResponse)
async def get_telemetry_point(
    telemetry_id: int,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get specific telemetry point details"""
    try:
        # Query metric with aggregated owners
        owners_subquery = select(
            MetricOwner.metric_id,
            func.group_concat(MetricOwner.owner_email, ', ').label('owner_name')
        ).group_by(MetricOwner.metric_id).subquery()

        query = select(
            Metric.id,
            Metric.name,
            Metric.type,
            Metric.summary,
            Metric.units,
            Metric.component,
            Metric.file_path,
            Metric.line_number,
            Metric.expires_after,
            Metric.summary_zh,
            Metric.name_zh,
            Metric.units_zh,
            Metric.component_zh,
            owners_subquery.c.owner_name
        ).outerjoin(
            owners_subquery, Metric.id == owners_subquery.c.metric_id
        ).where(Metric.id == telemetry_id)

        result = await extractor_db.execute(query)
        metric = result.first()

        if not metric:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="Telemetry point not found"
            )

        # Get direct team assignment for this metric first
        team_assignment = await get_team_assignment_for_metric(
            metric.id, business_db
        )

        # If no direct assignment, fall back to file pattern assignment
        if not team_assignment:
            team_assignment = await get_team_assignment_for_file(
                metric.file_path, business_db
            )

        # Get analysis status for this metric
        analysis_status = await get_analysis_status_for_metric(
            metric.id, business_db
        )

        # Get enum values if this is an enum type or histogram with associated enum
        enum_values = None

        # Initialize enum summary variables
        enum_summary = None
        enum_summary_zh = None
        if metric.type == 'enum':
            # Direct enum type
            enum_query = select(
                EnumValue.value,
                EnumValue.label,
                EnumValue.summary,
                EnumValue.summary_zh
            ).where(
                EnumValue.enum_id == telemetry_id
            ).order_by(EnumValue.value)

            enum_result = await extractor_db.execute(enum_query)
            enum_rows = enum_result.fetchall()

            if enum_rows:
                enum_values = [
                    EnumValueInfo(
                        value=row.value,
                        label=row.label,
                        summary=row.summary,
                        summary_zh=row.summary_zh
                    )
                    for row in enum_rows
                ]
        elif metric.type == 'histogram':
            # Check if histogram has associated enum
            histogram_detail_query = select(
                HistogramDetail.enum_id
            ).where(
                HistogramDetail.metric_id == telemetry_id
            )

            hd_result = await extractor_db.execute(histogram_detail_query)
            hd_row = hd_result.first()

            if hd_row and hd_row.enum_id:
                # Get enum values for the associated enum
                enum_query = select(
                    EnumValue.value,
                    EnumValue.label,
                    EnumValue.summary,
                    EnumValue.summary_zh
                ).where(
                    EnumValue.enum_id == hd_row.enum_id
                ).order_by(EnumValue.value)

                enum_result = await extractor_db.execute(enum_query)
                enum_rows = enum_result.fetchall()

                if enum_rows:
                    enum_values = [
                        EnumValueInfo(
                            value=row.value,
                            label=row.label,
                            summary=row.summary,
                            summary_zh=row.summary_zh
                        )
                        for row in enum_rows
                    ]

                # Get the associated enum's summary information
                enum_summary_query = select(
                    Metric.summary,
                    Metric.summary_zh
                ).where(
                    Metric.id == hd_row.enum_id
                )

                enum_summary_result = await extractor_db.execute(enum_summary_query)
                enum_summary_row = enum_summary_result.first()

                enum_summary = enum_summary_row.summary if enum_summary_row else None
                enum_summary_zh = enum_summary_row.summary_zh if enum_summary_row else None

        # Get histogram tokens if this is a histogram type
        tokens = None
        if metric.type == 'histogram':
            # Get histogram tokens with their variants
            token_query = select(
                HistogramToken.token_key,
                HistogramToken.token_name
            ).where(
                HistogramToken.metric_id == telemetry_id
            ).order_by(HistogramToken.token_key)

            token_result = await extractor_db.execute(token_query)
            token_rows = token_result.fetchall()

            if token_rows:
                tokens = []
                for token_row in token_rows:
                    # Find variants for this token by matching token_name
                    variant_query = select(
                        Variant.variant_name,
                        Variant.variant_summary,
                        Variant.variant_summary_zh
                    ).join(
                        Token, Token.id == Variant.token_id
                    ).where(
                        Token.token_name == token_row.token_name
                    ).order_by(Variant.variant_name)

                    variant_result = await extractor_db.execute(variant_query)
                    variant_rows = variant_result.fetchall()

                    variants = [
                        TokenVariant(
                            variant_name=variant_row.variant_name,
                            variant_summary=variant_row.variant_summary,
                            variant_summary_zh=variant_row.variant_summary_zh
                        )
                        for variant_row in variant_rows
                    ]

                    tokens.append(
                        TokenInfo(
                            token_key=token_row.token_key,
                            token_name=token_row.token_name,
                            variants=variants
                        )
                    )

        # Get UKM metrics if this is a UKM event type
        ukm_metrics = None
        if metric.type == 'ukm_event':
            # Get UKM metrics for this event
            ukm_query = select(
                UkmMetric.metric_name,
                UkmMetric.summary,
                UkmMetric.deprecated,
                UkmMetric.obsolete_message
            ).where(
                UkmMetric.event_id == telemetry_id
            ).order_by(UkmMetric.metric_name)

            ukm_result = await extractor_db.execute(ukm_query)
            ukm_rows = ukm_result.fetchall()

            if ukm_rows:
                ukm_metrics = [
                    UkmMetricInfo(
                        metric_name=row.metric_name,
                        summary=row.summary,
                        deprecated=row.deprecated,
                        obsolete_message=row.obsolete_message
                    )
                    for row in ukm_rows
                ]

        # Use the summary_zh from database, fallback to generated content if database field is null/empty
        summary_zh = metric.summary_zh
        if not summary_zh:
            # Fallback to generated descriptions only if database field is null/empty
            if metric.id == 7:
                summary_zh = "跟踪在Android上加载内联文本框时设置的模式标志组合。当{CallLocation}时发生。"
            elif metric.id == 17749:
                summary_zh = "AcceptCHFrameRestart - 接受CH框架重启，用于控制CH框架重启行为的枚举。"
            elif "Accessibility" in metric.name:
                summary_zh = f"无障碍功能监控：{metric.name}"

        return TelemetryPointResponse(
            id=metric.id,
            name=metric.name,
            type=metric.type,
            description=metric.summary,
            summary=metric.summary,  # Add summary field
            summary_zh=summary_zh,  # Add Chinese summary field
            units=metric.units,
            component=metric.component,
            file_path=metric.file_path,
            line_number=metric.line_number,
            expires_after=metric.expires_after,  # Add expiration information
            owner=metric.owner_name,
            assigned_team_id=team_assignment["team_id"] if team_assignment else None,
            assigned_team_name=team_assignment["team_name"] if team_assignment else None,
            analysis_status=analysis_status["analysis_status"],
            analysis_notes=analysis_status["analysis_notes"],
            analyzed_by=analysis_status["analyzed_by"],
            analyzed_at=analysis_status["analyzed_at"],
            metadata={
                "assignment_pattern": team_assignment.get("pattern"),
                "assignment_reason": team_assignment.get("assignment_reason"),
                "assigned_at": team_assignment.get("assigned_at")
            },
            enum_values=enum_values,
            enum_summary=enum_summary,      # Add enum summary (for histograms with associated enum)
            enum_summary_zh=enum_summary_zh,  # Add enum summary in Chinese (for histograms with associated enum)
            tokens=tokens,
            ukm_metrics=ukm_metrics
        )

    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry point: {str(e)}"
        )


@router.get("/stats/overview", response_model=TelemetryStatsResponse)
async def get_telemetry_stats(
    directory: Optional[str] = Query(None, description="Filter by directory path (deprecated, use directory_id)"),
    directory_id: Optional[int] = Query(None, description="Filter by directory ID for precise matching"),
    team_id: Optional[str] = Query(None, description="Filter by team ID"),
    type: Optional[str] = Query(None, description="Filter by metric type"),
    analysis_status: Optional[str] = Query(None, description="Filter by analysis status"),
    search: Optional[str] = Query(None, description="Filter by search term"),
    expires_after: Optional[str] = Query(None, description="Filter by expiration period (e.g., 'never', '30d', '90d', '1y')"),
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get telemetry overview statistics"""
    try:
        # Build base query conditions (same as list endpoint)
        base_conditions = []

        # Directory filter
        if directory_id:
            base_conditions.append(Metric.directory_id == directory_id)
        elif directory:
            base_conditions.append(Metric.file_path.like(f"{directory}%"))

        # Type filter
        if type:
            # Handle UMA/UKM categorical filtering
            if type == 'UMA':
                # UMA includes only histogram types (enum types excluded as per new rule)
                base_conditions.append(Metric.type.in_(['histogram']))
            elif type == 'UKM':
                # UKM includes ukm_event type (and potentially ukm_metric if it exists)
                base_conditions.append(Metric.type.in_(['ukm_event', 'ukm_metric']))
            else:
                # Direct type filtering for specific types
                base_conditions.append(Metric.type == type)

        # Search filter
        if search:
            search_pattern = f"%{search}%"
            base_conditions.append(
                or_(
                    Metric.name.ilike(search_pattern),
                    Metric.summary.ilike(search_pattern),
                    Metric.component.ilike(search_pattern)
                )
            )

        # Team filter - consistent with telemetry list API
        if team_id:
            # Use same logic as telemetry list API: convert team_id to team_name
            # and filter through MetricTeamAssignment for consistency
            team_query = select(Team.name).where(Team.id == team_id)
            team_result = await business_db.execute(team_query)
            team_row = team_result.first()

            if team_row:
                team_name = team_row.name
                # Subquery to get metric IDs assigned to this team (same as telemetry list)
                team_metrics_query = select(
                    MetricTeamAssignment.metric_id
                ).where(
                    MetricTeamAssignment.team_name == team_name
                )
                # Add filter to only include metrics assigned to this team
                base_conditions.append(Metric.id.in_(team_metrics_query))
            else:
                # If team not found, return no results (consistent with telemetry list)
                base_conditions.append(Metric.id.in_([]))  # Empty set

        # Apply expires_after filter if specified
        if expires_after:
            if expires_after.lower() == 'never':
                # Filter for metrics that never expire
                base_conditions.append(Metric.expires_after.is_(None))
            elif expires_after.lower() == 'has_expiration':
                # Filter for metrics that have an expiration period
                base_conditions.append(Metric.expires_after.isnot(None))
            else:
                # Filter for specific expiration patterns (e.g., '30d', '90d', '1y')
                base_conditions.append(Metric.expires_after == expires_after)

        # Analysis status filter - this is now simplified since we use metrics table directly
        analysis_filter_conditions = []
        if analysis_status:
            # Map analysis status filter to actual statuses in metrics table
            if analysis_status == 'ANALYZED':
                # Include all analyzed statuses
                analysis_filter_conditions.append(
                    or_(
                        Metric.analysis_status == 'ANALYZED',
                        Metric.analysis_status == 'NEEDS_COLLECTION',
                        Metric.analysis_status == 'NOT_NEEDED'
                    )
                )
            else:
                analysis_filter_conditions.append(Metric.analysis_status == analysis_status)

        # Use simple count queries - same base data as list endpoint (only histogram and ukm_event types)
        # Add type filter to base conditions if not already present
        type_filtered_conditions = base_conditions.copy() if base_conditions else []
        type_filtered_conditions.append(Metric.type.in_(['histogram', 'ukm_event']))

        total_query = select(func.count(Metric.id))
        if type_filtered_conditions:
            total_query = total_query.where(and_(*type_filtered_conditions))
        total_result = await extractor_db.execute(total_query)
        total_count = total_result.scalar() or 0

        # Type statistics (only histogram and ukm_event types)
        type_stats_query = select(
            Metric.type,
            func.count(Metric.id).label('count')
        )
        if type_filtered_conditions:
            type_stats_query = type_stats_query.where(and_(*type_filtered_conditions))
        type_stats_query = type_stats_query.group_by(Metric.type)
        type_result = await extractor_db.execute(type_stats_query)
        type_stats = {row.type: row.count for row in type_result}

        # Component statistics (only histogram and ukm_event types)
        component_stats_query = select(
            Metric.component,
            func.count(Metric.id).label('count')
        ).where(
            and_(
                Metric.component.isnot(None),
                Metric.type.in_(['histogram', 'ukm_event'])
            )
        )
        if base_conditions:
            component_stats_query = component_stats_query.where(and_(*base_conditions))
        component_stats_query = component_stats_query.group_by(Metric.component).limit(10)
        component_result = await extractor_db.execute(component_stats_query)
        component_stats = {row.component: row.count for row in component_result}

        # 🚀 PERFORMANCE OPTIMIZATION: Analysis status statistics - query metrics table directly
        # This eliminates JOIN operations and ensures consistency with telemetry list API
        analysis_stats_query = select(
            func.coalesce(Metric.analysis_status, 'NOT_ANALYZED').label('analysis_status'),
            func.count(Metric.id).label('count')
        ).where(Metric.type.in_(['histogram', 'ukm_event']))

        # Apply base conditions for analysis stats (same as list API)
        if base_conditions:
            analysis_stats_query = analysis_stats_query.where(and_(*base_conditions))

        # Apply analysis status filter if specified (same logic as list API)
        if analysis_filter_conditions:
            analysis_stats_query = analysis_stats_query.where(and_(*analysis_filter_conditions))

        analysis_stats_query = analysis_stats_query.group_by(
            func.coalesce(Metric.analysis_status, 'NOT_ANALYZED')
        )
        analysis_result = await extractor_db.execute(analysis_stats_query)
        analysis_stats = {row.analysis_status: row.count for row in analysis_result}

        # 🚀 NEW BUSINESS LOGIC: Simplified analysis status calculation
        # 业务规则:
        # - analyzed = needs_collection + not_needed (已完成的处理工作)
        # - not_analyzed = total_metrics - needs_collection - not_needed (待处理的工作)
        # - 数据库只存储: NOT_NEEDED, NEEDS_COLLECTION, 或 NULL
        needs_collection_count = analysis_stats.get('NEEDS_COLLECTION', 0)
        not_needed_count = analysis_stats.get('NOT_NEEDED', 0)

        # 按照新业务逻辑计算
        analyzed_count = needs_collection_count + not_needed_count
        not_analyzed_count = total_count - needs_collection_count - not_needed_count

        stats = {
            "total_metrics": total_count,
            "by_type": type_stats,
            "top_components": component_stats,
            "by_analysis_status": {
                "analyzed": analyzed_count,
                "not_analyzed": not_analyzed_count,
                "needs_collection": needs_collection_count,
                "not_needed": not_needed_count
            },
            "unassigned": total_count  # All metrics are currently unassigned since we don't use team filtering in stats
        }

        return TelemetryStatsResponse(
            success=True,
            data=stats,
            message="Telemetry statistics retrieved successfully"
        )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve telemetry statistics: {str(e)}"
        )


@router.get("/stats/teams", response_model=TeamStatsResponse)
async def get_team_analysis_stats(
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get analysis statistics broken down by teams - uses MetricTeamAssignment for consistency"""
    try:
        # Get all teams from business database first
        all_teams_query = select(Team.id, Team.name)
        all_teams_result = await business_db.execute(all_teams_query)
        all_teams = all_teams_result.fetchall()

        team_data = []

        # For each team, get metrics using the same logic as stats/overview API
        for team in all_teams:
            team_name = team.name
            team_id = team.id

            # Subquery to get metric IDs assigned to this team (same logic as stats/overview)
            team_metrics_query = select(
                MetricTeamAssignment.metric_id
            ).where(
                MetricTeamAssignment.team_name == team_name
            )

            # Get team statistics using MetricTeamAssignment filtering (consistent with stats/overview)
            from sqlalchemy import case

            team_stats_query = select(
                func.count(func.distinct(Metric.id)).label('total_points'),
                func.count(func.distinct(case((TelemetryAnalysis.analysis_status == 'NOT_NEEDED', Metric.id), else_=None))).label('analyzed_points')
            ).select_from(
                Metric
            ).outerjoin(
                TelemetryAnalysis, TelemetryAnalysis.metric_id == Metric.id
            ).where(
                and_(
                    Metric.id.in_(team_metrics_query),
                    Metric.type.in_(['histogram', 'ukm_event'])
                )
            )

            team_stats_result = await extractor_db.execute(team_stats_query)
            team_stats = team_stats_result.first()

            # Get SE members for this team
            se_members_query = select(
                func.group_concat(User.name, ', ').label('se_members')
            ).select_from(
                Team
            ).outerjoin(
                TeamMember, and_(Team.id == TeamMember.team_id, TeamMember.role == 'SE')
            ).outerjoin(
                User, TeamMember.user_id == User.id
            ).where(Team.id == team_id)

            se_members_result = await business_db.execute(se_members_query)
            se_members_row = se_members_result.first()

            total_points = team_stats.total_points or 0 if team_stats else 0
            analyzed_points = team_stats.analyzed_points or 0 if team_stats else 0
            analysis_progress = round((analyzed_points / total_points * 100), 1) if total_points > 0 else 0

            team_data.append({
                'teamId': team_id,
                'teamName': team_name,
                'totalPoints': total_points,
                'analyzedPoints': analyzed_points,
                'analysisProgress': analysis_progress,
                'seMembers': se_members_row.se_members if se_members_row else None
            })

        # Sort by team name (alphabetical)
        team_data.sort(key=lambda x: x['teamName'])

        return TeamStatsResponse(
            success=True,
            data=team_data,
            message="Team analysis statistics retrieved successfully"
        )
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve team analysis statistics: {str(e)}"
        )


# Pydantic models for analysis management
class AnalysisUpdateRequest(BaseModel):
    """Request model for updating analysis status"""
    analysis_status: str
    analysis_notes: Optional[str] = None
    team_id: Optional[str] = None


class AnalysisUpdateResponse(BaseModel):
    """Response model for analysis update"""
    success: bool
    message: str


@router.put("/{telemetry_id}/analysis", response_model=AnalysisUpdateResponse)
async def update_telemetry_analysis(
    telemetry_id: int,
    request: AnalysisUpdateRequest,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Update analysis status for a telemetry point"""
    logger.info(f"Updating analysis status for telemetry_id: {telemetry_id} to {request.analysis_status}")

    try:
        # Verify telemetry point exists
        metric_query = select(Metric.id).where(Metric.id == telemetry_id)
        metric_result = await extractor_db.execute(metric_query)
        if not metric_result.first():
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="Telemetry point not found"
            )

        # Check if analysis record exists and get team info in a single query using JOIN
        existing_query = select(
            TelemetryAnalysis.id,
            TelemetryAnalysis.team_id,
            TelemetryAnalysis.assigned_team_id,
            Team.name.label('team_name')
        ).select_from(
            TelemetryAnalysis.__table__.outerjoin(Team, or_(
                TelemetryAnalysis.team_id == Team.id,
                TelemetryAnalysis.assigned_team_id == Team.id
            ))
        ).where(TelemetryAnalysis.metric_id == telemetry_id)

        existing_result = await business_db.execute(existing_query)
        existing_row = existing_result.first()

        # Initialize tracking variables
        previous_status = "UNKNOWN"
        previous_team_name = None
        previous_team_id = None
        existing_analysis_id = None

        if existing_row:
            existing_analysis_id = existing_row.id
            previous_team_id = existing_row.team_id or existing_row.assigned_team_id
            previous_team_name = existing_row.team_name
            logger.info(f"Found existing analysis record: {existing_analysis_id}, team: {previous_team_name}")

            # Update existing record
            try:
                if existing_analysis_id:
                    update_stmt = (
                        update(TelemetryAnalysis)
                        .where(TelemetryAnalysis.id == existing_analysis_id)
                        .values(
                            analysis_status=request.analysis_status,
                            analysis_notes=request.analysis_notes,
                            analyzed_by=current_user["email"],
                            analyzed_at=datetime.utcnow(),
                            updated_at=datetime.utcnow(),
                            team_id=request.team_id if request.team_id else previous_team_id
                        )
                    )
                    await business_db.execute(update_stmt)
                else:
                    raise HTTPException(
                        status_code=500,
                        detail="No existing analysis ID available for update"
                    )

            except Exception as update_error:
                await business_db.rollback()
                raise HTTPException(
                    status_code=500,
                    detail=f"Failed to update analysis status: {update_error}"
                )
        else:
            # Create new analysis record
            try:
                new_analysis = TelemetryAnalysis(
                    id=str(uuid.uuid4()),
                    metric_id=telemetry_id,
                    analysis_status=request.analysis_status,
                    analysis_notes=request.analysis_notes,
                    analyzed_by=current_user["email"],
                    analyzed_at=datetime.utcnow(),
                    team_id=request.team_id
                )
                business_db.add(new_analysis)
            except Exception as create_error:
                raise HTTPException(
                    status_code=500,
                    detail=f"Failed to create analysis record: {create_error}"
                )

                    # Get new team name for activity log (only if different from previous team)
        new_team_name = None
        new_team_id = request.team_id
        if new_team_id and new_team_id != previous_team_id:
            team_query = select(Team.name).where(Team.id == new_team_id)
            team_result = await business_db.execute(team_query)
            team_row = team_result.first()
            if team_row:
                new_team_name = team_row.name

        # Create activity log entry (only if there are actual changes)
        if new_team_name or request.analysis_status != previous_status:
            activity_details = {
                "previous_status": previous_status,
                "new_status": request.analysis_status,
                "previous_team": previous_team_name,
                "new_team": new_team_name,
                "analysis_reason": request.analysis_notes,
                "telemetry_id": telemetry_id
            }

            activity_log = ActivityLog(
                id=str(uuid.uuid4()),
                user_id=current_user["user_id"],
                team_id=request.team_id,
                action="UPDATE_ANALYSIS",
                resource_type="TELEMETRY_ANALYSIS",
                resource_id=str(telemetry_id),
                details=json.dumps(activity_details, default=str)
            )
            business_db.add(activity_log)

        # Synchronize with metrics table in extractor database in the same transaction
        try:
            # Update the metrics table to keep data synchronized
            metrics_update_stmt = (
                update(Metric)
                .where(Metric.id == telemetry_id)
                .values(
                    analysis_status=request.analysis_status,
                    analysis_notes=request.analysis_notes,
                    analyzed_by=current_user["email"],
                    analyzed_at=datetime.utcnow(),
                    team_id=request.team_id
                )
            )

            await extractor_db.execute(metrics_update_stmt)

        except Exception as sync_error:
            logger.error(f"Failed to synchronize metrics table: {sync_error}")
            # Don't fail the whole operation, but log the error for monitoring

        # Single commit for both databases after all operations
        try:
            await business_db.commit()
            await extractor_db.commit()
        except Exception as commit_error:
            await business_db.rollback()
            await extractor_db.rollback()
            raise HTTPException(
                status_code=500,
                detail=f"Failed to commit changes: {commit_error}"
            )

        return AnalysisUpdateResponse(
            success=True,
            message=f"Analysis status updated to {request.analysis_status}"
        )

    except HTTPException:
        # Rollback both databases on HTTP exceptions
        try:
            await business_db.rollback()
        except Exception:
            pass
        try:
            await extractor_db.rollback()
        except Exception:
            pass
        raise
    except Exception as e:
        # Rollback both databases on general exceptions
        try:
            await business_db.rollback()
        except Exception:
            pass
        try:
            await extractor_db.rollback()
        except Exception:
            pass
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to update analysis status: {str(e)}"
        )


@router.put("/batch-analysis", response_model=AnalysisUpdateResponse)
async def batch_update_analysis(
    telemetry_ids: List[int],
    request: AnalysisUpdateRequest,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Update analysis status for multiple telemetry points - OPTIMIZED VERSION"""
    try:
        # Verify all telemetry points exist in a single query
        metric_query = select(Metric.id).where(Metric.id.in_(telemetry_ids))
        metric_result = await extractor_db.execute(metric_query)
        existing_ids = {row.id for row in metric_result.fetchall()}

        if len(existing_ids) != len(telemetry_ids):
            missing_ids = set(telemetry_ids) - existing_ids
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=f"Telemetry points not found: {missing_ids}"
            )

        # Get all existing analysis records in a single query instead of O(n) queries
        existing_analysis_query = select(
            TelemetryAnalysis.id,
            TelemetryAnalysis.metric_id
        ).where(TelemetryAnalysis.metric_id.in_(telemetry_ids))

        existing_analysis_result = await business_db.execute(existing_analysis_query)
        existing_records = {
            row.metric_id: row.id for row in existing_analysis_result.fetchall()
        }

        # Separate updates vs inserts for batch operations
        ids_to_update = []
        ids_to_insert = []

        for telemetry_id in telemetry_ids:
            if telemetry_id in existing_records:
                ids_to_update.append(telemetry_id)
            else:
                ids_to_insert.append(telemetry_id)

                            # Perform batch updates - a single UPDATE for all existing records
        if ids_to_update:
            update_stmt = (
                update(TelemetryAnalysis)
                .where(TelemetryAnalysis.id.in_([existing_records[tid] for tid in ids_to_update]))
                .values(
                    analysis_status=request.analysis_status,
                    analysis_notes=request.analysis_notes,
                    analyzed_by=current_user["email"],
                    analyzed_at=datetime.utcnow(),
                    updated_at=datetime.utcnow(),
                    team_id=request.team_id
                )
            )
            await business_db.execute(update_stmt)
            updated_count += len(ids_to_update)

        # Perform batch inserts - create all new records at once
        if ids_to_insert:
            new_records = [
                {
                    'id': str(uuid.uuid4()),
                    'metric_id': telemetry_id,
                    'analysis_status': request.analysis_status,
                    'analysis_notes': request.analysis_notes,
                    'analyzed_by': current_user["email"],
                    'analyzed_at': datetime.utcnow(),
                    'team_id': request.team_id
                }
                for telemetry_id in ids_to_insert
            ]

            # Use bulk_insert_mappings for efficient batch insert
            await business_db.execute(
                TelemetryAnalysis.__table__.insert(),
                new_records
            )
            updated_count += len(ids_to_insert)

        # Batch synchronize with metrics table - single UPDATE for all telemetry IDs
        metrics_update_stmt = (
            update(Metric)
            .where(Metric.id.in_(telemetry_ids))
            .values(
                analysis_status=request.analysis_status,
                analysis_notes=request.analysis_notes,
                analyzed_by=current_user["email"],
                analyzed_at=datetime.utcnow(),
                team_id=request.team_id
            )
        )

        await extractor_db.execute(metrics_update_stmt)

        # Single commit for both databases after all operations
        await business_db.commit()
        await extractor_db.commit()

        return AnalysisUpdateResponse(
            success=True,
            message=f"Updated analysis status for {updated_count} telemetry points to {request.analysis_status}"
        )

    except HTTPException:
        raise
    except Exception as e:
        await business_db.rollback()
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to update batch analysis status: {str(e)}"
        )


# Pydantic models for analysis history
class AnalysisHistoryResponse(BaseModel):
    """Response model for analysis history"""
    success: bool
    data: List[dict]
    message: str


class AnalysisHistoryItem(BaseModel):
    """Single analysis history item"""
    id: str
    timestamp: str
    user_name: str
    user_email: str
    action: str
    previous_status: Optional[str] = None
    new_status: Optional[str] = None
    previous_team: Optional[str] = None
    new_team: Optional[str] = None
    analysis_reason: Optional[str] = None
    details: Optional[dict] = None


@router.get("/{telemetry_id}/analysis-history", response_model=AnalysisHistoryResponse)
async def get_analysis_history(
    telemetry_id: int,
    page: int = 1,
    page_size: int = 20,
    current_user: dict = Depends(get_current_user),
    business_db: AsyncSession = Depends(get_business_db),
    extractor_db: AsyncSession = Depends(get_extractor_db)
):
    """Get analysis history for a specific telemetry point"""
    try:
        # Verify telemetry point exists
        metric_query = select(Metric.id, Metric.name).where(Metric.id == telemetry_id)
        metric_result = await extractor_db.execute(metric_query)
        metric_data = metric_result.first()
        if not metric_data:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail="Telemetry point not found"
            )

        history_items = []

        # Get telemetry analysis history
        analysis_query = select(
            TelemetryAnalysis.id,
            TelemetryAnalysis.analysis_status,
            TelemetryAnalysis.analysis_notes,
            TelemetryAnalysis.analyzed_by,
            TelemetryAnalysis.analyzed_at,
            TelemetryAnalysis.team_id,
            TelemetryAnalysis.assigned_team_id,
            TelemetryAnalysis.created_at,
            TelemetryAnalysis.updated_at,
            User.name,
            User.email
        ).outerjoin(
            User, TelemetryAnalysis.analyzed_by == User.id
        ).where(
            TelemetryAnalysis.metric_id == telemetry_id
        ).order_by(
            TelemetryAnalysis.created_at.desc(),
            TelemetryAnalysis.updated_at.desc()
        )

        analysis_result = await business_db.execute(analysis_query)

        for row in analysis_result.fetchall():
            # Get team names for team_id and assigned_team_id
            team_name = None
            assigned_team_name = None

            if row.team_id:
                team_query = select(Team.name).where(Team.id == row.team_id)
                team_result = await business_db.execute(team_query)
                team_row = team_result.first()
                if team_row:
                    team_name = team_row.name

            if row.assigned_team_id:
                assigned_team_query = select(Team.name).where(Team.id == row.assigned_team_id)
                assigned_team_result = await business_db.execute(assigned_team_query)
                assigned_team_row = assigned_team_result.first()
                if assigned_team_row:
                    assigned_team_name = assigned_team_row.name

            history_items.append({
                "id": str(row.id),
                "timestamp": (row.updated_at or row.analyzed_at or row.created_at).isoformat() if (row.updated_at or row.analyzed_at or row.created_at) else None,
                "user_name": row.name or "System",
                "user_email": row.email or "",
                "action": "ANALYSIS_UPDATE",
                "previous_status": None,  # Will be populated in next step
                "new_status": row.analysis_status,
                "previous_team": None,   # Will be populated in next step
                "new_team": assigned_team_name or team_name,
                "analysis_reason": row.analysis_notes,
                "details": {
                    "analyzed_at": row.analyzed_at.isoformat() if row.analyzed_at else None,
                    "team_id": row.team_id,
                    "assigned_team_id": row.assigned_team_id
                }
            })

        # Get activity log entries for this telemetry point
        activity_query = select(
            ActivityLog.id,
            ActivityLog.action,
            ActivityLog.details,
            ActivityLog.created_at,
            User.name,
            User.email
        ).outerjoin(
            User, ActivityLog.user_id == User.id
        ).where(
            ActivityLog.resource_type == "TELEMETRY_ANALYSIS",
            ActivityLog.resource_id == str(telemetry_id)
        ).order_by(
            ActivityLog.created_at.desc()
        )

        activity_result = await business_db.execute(activity_query)

        for row in activity_result.fetchall():
            try:
                details = json.loads(row.details) if row.details else {}
                history_items.append({
                    "id": str(row.id),
                    "timestamp": row.created_at.isoformat(),
                    "user_name": row.name or "System",
                    "user_email": row.email or "",
                    "action": row.action,
                    "previous_status": details.get("previous_status"),
                    "new_status": details.get("new_status"),
                    "previous_team": details.get("previous_team"),
                    "new_team": details.get("new_team"),
                    "analysis_reason": details.get("analysis_reason"),
                    "details": details
                })
            except json.JSONDecodeError:
                # If details is not valid JSON, still include basic info
                history_items.append({
                    "id": str(row.id),
                    "timestamp": row.created_at.isoformat(),
                    "user_name": row.name or "System",
                    "user_email": row.email or "",
                    "action": row.action,
                    "previous_status": None,
                    "new_status": None,
                    "previous_team": None,
                    "new_team": None,
                    "analysis_reason": None,
                    "details": {"raw_details": row.details}
                })

        # Sort all items by timestamp (most recent first)
        history_items.sort(key=lambda x: x["timestamp"] if x["timestamp"] else "", reverse=True)

        # Apply pagination
        total_items = len(history_items)
        start_index = (page - 1) * page_size
        end_index = start_index + page_size
        paginated_items = history_items[start_index:end_index]

        return AnalysisHistoryResponse(
            success=True,
            data=paginated_items,
            message=f"Retrieved {len(paginated_items)} history records"
        )

    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Failed to retrieve analysis history: {str(e)}"
        )