"""
Flags HyperCache for feature-flags service.

This module provides a HyperCache that stores feature flags for the feature-flags service.
Unlike the local_evaluation.py cache which provides rich data for SDKs (including cohorts
and group type mappings), this cache provides just the raw flag data.

The cache is automatically invalidated when:
- FeatureFlag models are created, updated, or deleted
- Team models are created or deleted (to ensure flag caches are cleaned up)
- Hourly refresh job detects expiring entries (TTL < 24h)

Cache Key Pattern:
- Uses team_id as the key
- Stored in both Redis and S3 via HyperCache

Configuration:
- Redis TTL: 7 days (configurable via FLAGS_CACHE_TTL env var)
- Miss TTL: 1 day (configurable via FLAGS_CACHE_MISS_TTL env var)

Manual operations:
    from posthog.models.feature_flag.flags_cache import clear_flags_cache
    clear_flags_cache(team_id)
"""

from collections import defaultdict
from typing import Any

from django.conf import settings
from django.contrib.postgres.aggregates import ArrayAgg
from django.db import transaction
from django.db.models import Q
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver

import structlog

from posthog.caching.flags_redis_cache import FLAGS_DEDICATED_CACHE_ALIAS
from posthog.metrics import TOMBSTONE_COUNTER
from posthog.models.feature_flag import FeatureFlag
from posthog.models.feature_flag.feature_flag import get_feature_flags, serialize_feature_flags
from posthog.models.team import Team
from posthog.redis import get_client
from posthog.storage.cache_expiry_manager import (
    cleanup_stale_expiry_tracking as cleanup_generic,
    get_teams_with_expiring_caches,
    refresh_expiring_caches,
    track_cache_expiry,
)
from posthog.storage.hypercache import HyperCache
from posthog.storage.hypercache_manager import (
    HyperCacheManagementConfig,
    get_cache_stats as get_cache_stats_generic,
)

logger = structlog.get_logger(__name__)

# Sorted set key for tracking cache expirations
FLAGS_CACHE_EXPIRY_SORTED_SET = "flags_cache_expiry"


def _get_feature_flags_for_service(team: Team) -> dict[str, Any]:
    """
    Get feature flags for the feature-flags service.

    Fetches all active, non-deleted feature flags for the team and returns them
    wrapped in a dict that HyperCache can serialize. The actual flag data is in the
    "flags" key as a list of flag dictionaries.

    Returns:
        dict: {"flags": [...]} where flags is a list of flag dictionaries
    """
    flags = get_feature_flags(team=team)
    flags_data = serialize_feature_flags(flags)

    logger.info(
        "Loaded feature flags for service cache",
        team_id=team.id,
        project_id=team.project_id,
        flag_count=len(flags_data),
    )

    # Wrap in dict for HyperCache compatibility
    return {"flags": flags_data}


def _get_feature_flags_for_teams_batch(teams: list[Team]) -> dict[int, dict[str, Any]]:
    """
    Batch load feature flags for multiple teams in one query.

    This avoids N+1 queries by loading all flags for all teams at once,
    then grouping them by team_id.

    Args:
        teams: List of Team objects to load flags for

    Returns:
        Dict mapping team_id to {"flags": [...]} for each team
    """
    if not teams:
        return {}

    # Load all flags for all teams in one query with evaluation tags pre-loaded
    all_flags = list(
        FeatureFlag.objects.filter(team__in=teams, active=True, deleted=False)
        .select_related("team")
        .annotate(
            evaluation_tag_names_agg=ArrayAgg(
                "evaluation_tags__tag__name",
                filter=Q(evaluation_tags__isnull=False),
                distinct=True,
            )
        )
    )

    # Transfer aggregated tag names to model instances
    for flag in all_flags:
        flag._evaluation_tag_names = flag.evaluation_tag_names_agg or []

    # Group flags by team_id
    flags_by_team_id: dict[int, list[FeatureFlag]] = defaultdict(list)
    for flag in all_flags:
        flags_by_team_id[flag.team_id].append(flag)

    # Serialize flags for each team
    result: dict[int, dict[str, Any]] = {}
    for team in teams:
        team_flags = flags_by_team_id.get(team.id, [])
        flags_data = serialize_feature_flags(team_flags)

        logger.info(
            "Loaded feature flags for service cache (batch)",
            team_id=team.id,
            project_id=team.project_id,
            flag_count=len(flags_data),
        )

        result[team.id] = {"flags": flags_data}

    return result


def _track_cache_expiry(team: Team | int, ttl_seconds: int) -> None:
    """
    Track cache expiration in Redis sorted set for efficient expiry queries.

    Args:
        team: Team object or team ID
        ttl_seconds: TTL in seconds from now
    """
    track_cache_expiry(FLAGS_CACHE_EXPIRY_SORTED_SET, team, ttl_seconds, redis_url=settings.FLAGS_REDIS_URL)


# HyperCache instance for feature-flags service
# Use dedicated flags cache alias if available, otherwise defaults to default cache
flags_hypercache = HyperCache(
    namespace="feature_flags",
    value="flags.json",
    load_fn=lambda key: _get_feature_flags_for_service(HyperCache.team_from_key(key)),
    cache_ttl=settings.FLAGS_CACHE_TTL,
    cache_miss_ttl=settings.FLAGS_CACHE_MISS_TTL,
    cache_alias=FLAGS_DEDICATED_CACHE_ALIAS if FLAGS_DEDICATED_CACHE_ALIAS in settings.CACHES else None,
    batch_load_fn=_get_feature_flags_for_teams_batch,
)


def get_flags_from_cache(team: Team) -> list[dict[str, Any]] | None:
    """
    Get feature flags from the cache for a team.

    Only operates when FLAGS_REDIS_URL is configured to avoid reading from/writing to shared cache.

    Args:
        team: The team to get flags for

    Returns:
        list: Flag dictionaries (empty list if team has zero flags)
        None: Cache miss or FLAGS_REDIS_URL not configured
    """
    if not settings.FLAGS_REDIS_URL:
        return None

    result = flags_hypercache.get_from_cache(team)
    if result is None:
        return None
    return result.get("flags", [])


def update_flags_cache(team: Team | int, ttl: int | None = None) -> bool:
    """
    Update the flags cache for a team.

    This explicitly updates both Redis and S3 with the latest flag data.
    Only operates when FLAGS_REDIS_URL is configured to avoid writing to shared cache.

    Note: Update duration is tracked by CACHE_SYNC_DURATION_HISTOGRAM in hypercache.py

    Args:
        team: Team object or team ID
        ttl: Optional custom TTL in seconds (defaults to FLAGS_CACHE_TTL)

    Returns:
        True if cache update succeeded, False otherwise
    """
    if not settings.FLAGS_REDIS_URL:
        return False

    success = flags_hypercache.update_cache(team, ttl=ttl)

    team_id = team.id if isinstance(team, Team) else team

    if not success:
        logger.warning("Failed to update flags cache", team_id=team_id)
    else:
        # Track expiration in sorted set for efficient queries
        ttl_seconds = ttl if ttl is not None else settings.FLAGS_CACHE_TTL
        _track_cache_expiry(team, ttl_seconds)

    return success


# Initialize hypercache management config after update_flags_cache is defined
FLAGS_HYPERCACHE_MANAGEMENT_CONFIG = HyperCacheManagementConfig(
    hypercache=flags_hypercache,
    update_fn=update_flags_cache,
    cache_name="flags",
)

# Derive cache expiry config from hypercache management config (eliminates duplication)
FLAGS_CACHE_EXPIRY_CONFIG = FLAGS_HYPERCACHE_MANAGEMENT_CONFIG.cache_expiry_config()


def clear_flags_cache(team: Team | int, kinds: list[str] | None = None) -> None:
    """
    Clear the flags cache for a team.

    Only operates when FLAGS_REDIS_URL is configured to avoid writing to shared cache.

    Args:
        team: Team object or team ID
        kinds: Optional list of cache kinds to clear ("redis", "s3")
    """
    if not settings.FLAGS_REDIS_URL:
        return

    flags_hypercache.clear_cache(team, kinds=kinds)

    # Remove from expiry tracking sorted set
    try:
        redis_client = get_client()
        team_id = team.id if isinstance(team, Team) else team
        redis_client.zrem(FLAGS_CACHE_EXPIRY_SORTED_SET, str(team_id))
    except Exception as e:
        logger.warning("Failed to remove from expiry tracking", error=str(e), error_type=type(e).__name__)


def get_teams_with_expiring_flags_caches(ttl_threshold_hours: int = 24, limit: int = 5000) -> list[Team]:
    """
    Get teams whose flags caches are expiring soon using sorted set for efficient lookup.

    Uses ZRANGEBYSCORE on the expiry tracking sorted set instead of scanning all Redis keys.
    This is O(log N + M) where M is the number of expiring teams, vs O(N) for SCAN.

    Args:
        ttl_threshold_hours: Refresh caches expiring within this many hours
        limit: Maximum number of teams to return (default 5000)

    Returns:
        List of Team objects whose caches need refresh (up to limit)
    """
    return get_teams_with_expiring_caches(FLAGS_CACHE_EXPIRY_CONFIG, ttl_threshold_hours, limit)


def refresh_expiring_flags_caches(ttl_threshold_hours: int = 24, limit: int = 5000) -> tuple[int, int]:
    """
    Refresh flags caches that are expiring soon to prevent cache misses.

    This is the main hourly job that keeps caches fresh. It:
    1. Finds cache entries with TTL < threshold (up to limit)
    2. Refreshes them with new data and full TTL

    Processes teams in batches (default 5000). If more teams are expiring than the limit,
    subsequent runs will process the next batch.

    Note: Metrics are pushed to Pushgateway by refresh_expiring_caches() via push_hypercache_teams_processed_metrics()

    Args:
        ttl_threshold_hours: Refresh caches expiring within this many hours
        limit: Maximum number of teams to refresh per run (default 5000)
               5000 chosen as starting point to balance:
               - Memory efficiency: Doesn't load too many teams into memory at once
               - Throughput: With ~200K teams total, hourly runs can process 120K/day (5000 * 24)
               - Responsiveness: Completes quickly enough to not block other operations

    Returns:
        Tuple of (successful_refreshes, failed_refreshes)
    """
    # Metrics are now tracked in cache_expiry_manager.py using consolidated counters
    return refresh_expiring_caches(FLAGS_CACHE_EXPIRY_CONFIG, ttl_threshold_hours, limit)


def cleanup_stale_expiry_tracking() -> int:
    """
    Clean up orphaned entries in the expiry tracking sorted set.

    Removes entries for teams that no longer exist in the database.
    Should be run periodically (e.g., daily) to prevent sorted set bloat.

    Returns:
        Number of stale entries removed
    """
    removed = cleanup_generic(FLAGS_CACHE_EXPIRY_CONFIG)

    if removed > 0:
        TOMBSTONE_COUNTER.labels(namespace="flags", operation="stale_expiry_tracking", component="flags_cache").inc(
            removed
        )

    return removed


def get_cache_stats() -> dict[str, Any]:
    """
    Get statistics about the flags cache.

    Returns:
        Dictionary with cache statistics including size information
    """
    return get_cache_stats_generic(FLAGS_HYPERCACHE_MANAGEMENT_CONFIG)


# Signal handlers for automatic cache invalidation


@receiver(post_save, sender=FeatureFlag)
@receiver(post_delete, sender=FeatureFlag)
def feature_flag_changed_flags_cache(sender, instance: "FeatureFlag", **kwargs):
    """
    Invalidate flags cache when a feature flag is created, updated, or deleted.

    This ensures the feature-flags service always has fresh flag data after any flag changes.
    Only operates when FLAGS_REDIS_URL is configured.
    """
    if not settings.FLAGS_REDIS_URL:
        return

    from posthog.tasks.feature_flags import update_team_service_flags_cache

    # Defer task execution until after the transaction commits to avoid race conditions
    # Note: Metric tracking happens in the task itself to capture actual success/failure result
    transaction.on_commit(lambda: update_team_service_flags_cache.delay(instance.team_id))


@receiver(post_save, sender=Team)
def team_created_flags_cache(sender, instance: "Team", created: bool, **kwargs):
    """
    Warm flags cache when a team is created.

    This ensures the cache is immediately available for new teams.
    Only operates when FLAGS_REDIS_URL is configured.
    """
    if not created or not settings.FLAGS_REDIS_URL:
        return

    from posthog.tasks.feature_flags import update_team_service_flags_cache

    # Defer task execution until after the transaction commits
    # Note: Metric tracking happens in the task itself to capture actual success/failure result
    transaction.on_commit(lambda: update_team_service_flags_cache.delay(instance.id))


@receiver(post_delete, sender=Team)
def team_deleted_flags_cache(sender, instance: "Team", **kwargs):
    """
    Clear flags cache when a team is deleted.

    This ensures we don't have stale cache entries for deleted teams.
    Only operates when FLAGS_REDIS_URL is configured.
    """
    if not settings.FLAGS_REDIS_URL:
        return

    # For unit tests, only clear Redis to avoid S3 timestamp issues with frozen time
    kinds = ["redis"] if settings.TEST else None
    clear_flags_cache(instance, kinds=kinds)
