"""
Team metadata HyperCache - Full team object caching using existing HyperCache infrastructure.

This module provides dedicated caching of complete Team objects (39 fields) using the
existing HyperCache system which handles Redis + S3 backup automatically.

Memory Usage Estimation:
------------------------
Cache size varies significantly based on your team configurations. Factors include:
- Number of configured features (recording settings, survey configs, etc.)
- Length of organization and team names
- Number of populated optional fields
- Complexity of JSON configuration objects

Typical ranges (based on preliminary analysis):
- Per team: 10-30 KB compressed in Redis
- Compression ratio: 2-4:1 from raw JSON

To get accurate estimates for your data, run:
    python manage.py analyze_team_cache_sizes

Tool will sample the cache and provide percentile-based memory projections.

Configuration:
-------------------
- Redis TTL: 7 days (configurable via TEAM_METADATA_CACHE_TTL env var)
- Miss TTL: 1 day (configurable via TEAM_METADATA_CACHE_MISS_TTL env var)

Cache Invalidation:
-------------------
Caches are invalidated automatically when:
- Team is saved (via Django signal → Celery task)
- Team is deleted (via Django signal → immediate clear)
- Hourly refresh job detects expiring entries (TTL < 24h)

Manual invalidation:
    from posthog.storage.team_metadata_cache import clear_team_metadata_cache
    clear_team_metadata_cache(team_id)

Note: Redis adds ~100 bytes overhead per key. S3 storage uses similar compression.
"""

import os
import time
from typing import Any

from django.conf import settings
from django.db import transaction

import structlog

from posthog.caching.flags_redis_cache import FLAGS_DEDICATED_CACHE_ALIAS
from posthog.metrics import TOMBSTONE_COUNTER
from posthog.models.team.team import Team
from posthog.redis import get_client
from posthog.storage.cache_expiry_manager import (
    cleanup_stale_expiry_tracking as cleanup_generic,
    get_teams_with_expiring_caches as get_teams_generic,
    refresh_expiring_caches as refresh_generic,
)
from posthog.storage.hypercache import HyperCache, HyperCacheStoreMissing, KeyType
from posthog.storage.hypercache_manager import (
    HyperCacheManagementConfig,
    get_cache_stats as get_cache_stats_generic,
)

logger = structlog.get_logger(__name__)


TEAM_METADATA_CACHE_TTL = int(os.environ.get("TEAM_METADATA_CACHE_TTL", str(60 * 60 * 24 * 7)))
TEAM_METADATA_CACHE_MISS_TTL = int(os.environ.get("TEAM_METADATA_CACHE_MISS_TTL", str(60 * 60 * 24)))

# Sorted set key for tracking cache expirations
TEAM_CACHE_EXPIRY_SORTED_SET = "team_metadata_cache_expiry"

# NOTE: Includes secret tokens (api_token, secret_api_token, secret_api_token_backup)
# for flags service consumption. These are stored in dedicated redis + potentially S3.
# This is acceptable for our threat model where flags service needs auth tokens to validate requests.
TEAM_METADATA_FIELDS = [
    "id",
    "project_id",
    "organization_id",
    "uuid",
    "name",
    "api_token",
    "secret_api_token",
    "secret_api_token_backup",
    "app_urls",
    "slack_incoming_webhook",
    "created_at",
    "updated_at",
    "anonymize_ips",
    "completed_snippet_onboarding",
    "has_completed_onboarding_for",
    "onboarding_tasks",
    "ingested_event",
    "person_processing_opt_out",
    "extra_settings",
    "session_recording_opt_in",
    "session_recording_sample_rate",
    "session_recording_minimum_duration_milliseconds",
    "session_recording_linked_flag",
    "session_recording_network_payload_capture_config",
    "session_recording_masking_config",
    "session_recording_url_trigger_config",
    "session_recording_url_blocklist_config",
    "session_recording_event_trigger_config",
    "session_recording_trigger_match_type_config",
    "session_replay_config",
    "session_recording_retention_period",
    "survey_config",
    "surveys_opt_in",
    "capture_console_log_opt_in",
    "capture_performance_opt_in",
    "capture_dead_clicks",
    "autocapture_opt_out",
    "autocapture_web_vitals_opt_in",
    "autocapture_web_vitals_allowed_metrics",
]


# ===================================================================
# Private helpers
# ===================================================================


def _serialize_team_field(field: str, value: Any) -> Any:
    """
    Convert a team field value to cache-serializable format.

    Args:
        field: Field name from TEAM_METADATA_FIELDS
        value: Raw field value from Team model

    Returns:
        Serialized value suitable for JSON encoding
    """
    if field in ["created_at", "updated_at"]:
        return value.isoformat() if value else None
    elif field == "uuid":
        return str(value) if value else None
    elif field == "organization_id":
        return str(value) if value else None
    elif field == "session_recording_sample_rate":
        return float(value) if value is not None else None
    return value


def _track_cache_expiry(team: Team | str | int, ttl_seconds: int) -> None:
    """
    Track cache expiration in Redis sorted set for efficient expiry queries.

    Args:
        team: Team object, API token string, or team ID
        ttl_seconds: TTL in seconds from now
    """
    try:
        redis_client = get_client(settings.FLAGS_REDIS_URL)

        # Get team token for tracking
        if isinstance(team, Team):
            token = team.api_token
        elif isinstance(team, str):
            token = team
        else:
            # If team ID, need to fetch token - but this is rare, skip tracking
            return

        expiration_timestamp = time.time() + ttl_seconds
        redis_client.zadd(TEAM_CACHE_EXPIRY_SORTED_SET, {token: expiration_timestamp})
    except Exception as e:
        logger.warning("Failed to track cache expiry in sorted set", error=str(e), error_type=type(e).__name__)


def _serialize_team_to_metadata(team: Team) -> dict[str, Any]:
    """
    Serialize a Team object to metadata dictionary.

    Args:
        team: Team object with organization and project already loaded

    Returns:
        Dictionary containing full team metadata
    """
    metadata = {}
    for field in TEAM_METADATA_FIELDS:
        value = getattr(team, field, None)
        metadata[field] = _serialize_team_field(field, value)

    metadata["organization_name"] = team.organization.name if team.organization else None
    metadata["project_name"] = team.project.name if team.project else None

    return metadata


def _batch_load_team_metadata(teams: list[Team]) -> dict[int, dict[str, Any]]:
    """
    Load metadata for multiple teams efficiently.

    Used by warm_caches() to avoid N+1 queries when warming the cache.
    Teams are already loaded with select_related("organization", "project")
    by the warming framework, so this just serializes them.

    Args:
        teams: List of Team objects with organization/project pre-loaded

    Returns:
        Dict mapping team_id -> metadata dict
    """
    return {team.id: _serialize_team_to_metadata(team) for team in teams}


def _load_team_metadata(team_key: KeyType) -> dict[str, Any] | HyperCacheStoreMissing:
    """
    Load full team metadata from the database.

    Args:
        team_key: Team identifier (can be Team object, API token string, or team ID)

    Returns:
        Dictionary containing full team metadata, or HyperCacheStoreMissing if team not found
    """
    try:
        with transaction.atomic():
            team = HyperCache.team_from_key(team_key)

            if isinstance(team, Team) and (not Team.organization.is_cached(team) or not Team.project.is_cached(team)):
                team = Team.objects.select_related("organization", "project").get(id=team.id)

            return _serialize_team_to_metadata(team)

    except Team.DoesNotExist:
        logger.debug("Team not found for cache lookup")
        return HyperCacheStoreMissing()

    except Exception as e:
        logger.exception(
            "Error loading team metadata",
            error_type=type(e).__name__,
            team_key_type=type(team_key).__name__,
        )
        return HyperCacheStoreMissing()


# ===================================================================
# Module initialization
# ===================================================================

team_metadata_hypercache = HyperCache(
    namespace="team_metadata",
    value="full_metadata.json",
    token_based=True,
    load_fn=_load_team_metadata,
    batch_load_fn=_batch_load_team_metadata,
    cache_ttl=TEAM_METADATA_CACHE_TTL,
    cache_miss_ttl=TEAM_METADATA_CACHE_MISS_TTL,
    cache_alias=FLAGS_DEDICATED_CACHE_ALIAS if FLAGS_DEDICATED_CACHE_ALIAS in settings.CACHES else None,
)


# ===================================================================
# Public API - Core cache operations
# ===================================================================


def get_team_metadata(team: Team | str | int) -> dict[str, Any] | None:
    """
    Get full team metadata from cache.

    Args:
        team: Team object, API token string, or team ID

    Returns:
        Dictionary with team metadata or None if not found
    """
    return team_metadata_hypercache.get_from_cache(team)


def update_team_metadata_cache(team: Team | str | int, ttl: int | None = None) -> bool:
    """
    Update the metadata cache for a specific team.

    Note: Update duration is tracked by CACHE_SYNC_DURATION_HISTOGRAM in hypercache.py

    Args:
        team: Team object, API token string, or team ID
        ttl: Optional custom TTL in seconds (defaults to TEAM_METADATA_CACHE_TTL)

    Returns:
        True if cache update succeeded, False otherwise
    """
    success = team_metadata_hypercache.update_cache(team, ttl=ttl)

    team_id = team.id if isinstance(team, Team) else "unknown"

    if not success:
        logger.warning("Failed to update metadata cache", team_id=team_id)
    else:
        # Track expiration in sorted set for efficient queries
        ttl_seconds = ttl if ttl is not None else TEAM_METADATA_CACHE_TTL
        _track_cache_expiry(team, ttl_seconds)

    return success


# Initialize hypercache management config after update_team_metadata_cache is defined
TEAM_HYPERCACHE_MANAGEMENT_CONFIG = HyperCacheManagementConfig(
    hypercache=team_metadata_hypercache,
    update_fn=update_team_metadata_cache,
    cache_name="team_metadata",
)

# Derive cache expiry config from hypercache management config (eliminates duplication)
TEAM_CACHE_EXPIRY_CONFIG = TEAM_HYPERCACHE_MANAGEMENT_CONFIG.cache_expiry_config()


def clear_team_metadata_cache(team: Team | str | int, kinds: list[str] | None = None) -> None:
    """
    Clear the metadata cache for a team.

    Args:
        team: Team object, API token string, or team ID
        kinds: Optional list of cache types to clear (["redis", "s3"])
    """
    team_metadata_hypercache.clear_cache(team, kinds=kinds)

    # Remove from expiry tracking sorted set
    try:
        redis_client = get_client(settings.FLAGS_REDIS_URL)

        if isinstance(team, Team):
            token = team.api_token
        elif isinstance(team, str):
            token = team
        else:
            # If team ID, skip sorted set cleanup (rare case)
            return

        redis_client.zrem(TEAM_CACHE_EXPIRY_SORTED_SET, token)
    except Exception as e:
        logger.warning("Failed to remove from expiry tracking", error=str(e), error_type=type(e).__name__)


# ===================================================================
# Batch refresh operations
# ===================================================================


def get_teams_with_expiring_caches(ttl_threshold_hours: int = 24, limit: int = 5000) -> list[Team]:
    """
    Get teams whose caches are expiring soon using sorted set for efficient lookup.

    Uses ZRANGEBYSCORE on the expiry tracking sorted set instead of scanning all Redis keys.
    This is O(log N + M) where M is the number of expiring teams, vs O(N) for SCAN.

    Args:
        ttl_threshold_hours: Refresh caches expiring within this many hours
        limit: Maximum number of teams to return (default 5000)

    Returns:
        List of Team objects whose caches need refresh (up to limit)
    """
    return get_teams_generic(TEAM_CACHE_EXPIRY_CONFIG, ttl_threshold_hours, limit)


def refresh_expiring_caches(ttl_threshold_hours: int = 24, limit: int = 5000) -> tuple[int, int]:
    """
    Refresh caches that are expiring soon to prevent cache misses.

    This is the main hourly job that keeps caches fresh. It:
    1. Finds cache entries with TTL < threshold (up to limit)
    2. Refreshes them with new data and full TTL

    Processes teams in batches (default 5000). If more teams are expiring than the limit,
    subsequent runs will process the next batch.

    Note: Metrics are pushed to Pushgateway by refresh_expiring_caches() via push_hypercache_teams_processed_metrics()

    Args:
        ttl_threshold_hours: Refresh caches expiring within this many hours
        limit: Maximum number of teams to refresh per run (default 5000)

    Returns:
        Tuple of (successful_refreshes, failed_refreshes)
    """
    # Metrics are now tracked in cache_expiry_manager.py using consolidated counters
    return refresh_generic(TEAM_CACHE_EXPIRY_CONFIG, ttl_threshold_hours, limit)


def cleanup_stale_expiry_tracking() -> int:
    """
    Clean up orphaned entries in the expiry tracking sorted set.

    Removes entries for teams that no longer exist in the database.
    Should be run periodically (e.g., daily) to prevent sorted set bloat.

    Returns:
        Number of stale entries removed
    """
    removed = cleanup_generic(TEAM_CACHE_EXPIRY_CONFIG)

    if removed > 0:
        TOMBSTONE_COUNTER.labels(
            namespace="team_metadata", operation="stale_expiry_tracking", component="team_metadata_cache"
        ).inc(removed)

    return removed


# ===================================================================
# Stats and observability
# ===================================================================


def get_cache_stats() -> dict[str, Any]:
    """
    Get statistics about the team metadata cache.

    Returns:
        Dictionary with cache statistics including size information
    """
    return get_cache_stats_generic(TEAM_HYPERCACHE_MANAGEMENT_CONFIG)
