import logging
from datetime import UTC, datetime, timedelta
from pathlib import Path
from typing import Any, Literal, TypedDict

from django.db.models import Value
from django.db.models.functions import StrIndex
from snuba_sdk import BooleanCondition, BooleanOp, Column, Condition, Entity, Function, Op, Query
from snuba_sdk import Request as SnubaRequest

from sentry import eventstore
from sentry.api.serializers import EventSerializer, serialize
from sentry.api.serializers.models.event import EventSerializerResponse
from sentry.integrations.models.repository_project_path_config import RepositoryProjectPathConfig
from sentry.integrations.source_code_management.commit_context import (
    OPEN_PR_MAX_FILES_CHANGED,
    OPEN_PR_MAX_LINES_CHANGED,
    OPEN_PR_MAX_RECENT_ISSUES,
    PullRequestFile,
)
from sentry.models.group import Group, GroupStatus
from sentry.models.project import Project
from sentry.models.repository import Repository
from sentry.seer.fetch_issues.more_parsing import (
    patch_parsers_more,
    simple_function_name_conditions,
)
from sentry.snuba.dataset import Dataset
from sentry.snuba.referrer import Referrer
from sentry.utils.snuba import raw_snql_query

logger = logging.getLogger(__name__)


MAX_NUM_ISSUES_PER_FILE_DEFAULT = 5
"""
The maximum number of related issues to return for one file.
"""

NUM_DAYS_AGO = 14
"""
The number of previous days from now to find issues and events.
This number is global so that fetching issues and events is consistent.
"""

STACKFRAME_COUNT = 20
"""
The number of stack frames to check for function name and file name matches.
"""


class PrFile(TypedDict):
    filename: str
    patch: str
    status: Literal["added", "removed", "modified", "renamed", "copied", "changed", "unchanged"]
    changes: int


def safe_for_fetching_issues(pr_files: list[PrFile]) -> list[PrFile]:
    changed_file_count = 0
    changed_lines_count = 0
    filtered_pr_files = []
    for file in pr_files:
        file_extension = file["filename"].split(".")[-1]
        if file["status"] != "modified" or file_extension not in patch_parsers_more:
            continue

        changed_file_count += 1
        changed_lines_count += file["changes"]
        filtered_pr_files.append(file)

        if changed_file_count > OPEN_PR_MAX_FILES_CHANGED:
            return []
        if changed_lines_count > OPEN_PR_MAX_LINES_CHANGED:
            return []

    return filtered_pr_files


def _get_issues_for_file(
    projects: list[Project],
    sentry_filenames: list[str],
    function_names: list[str],
    event_timestamp_start: datetime,
    event_timestamp_end: datetime,
    max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
    run_id: int | None = None,
) -> list[dict[str, Any]]:
    """
    Fetch issues with their latest event if its stacktrace frames match the function names
    and file names.
    """
    if not projects:
        return []

    # Fetch an initial, candidate set of groups.
    group_ids: list[int] = list(
        Group.objects.filter(
            first_seen__gte=datetime.now(UTC) - timedelta(weeks=26),
            last_seen__gte=event_timestamp_start,
            status__in=[GroupStatus.UNRESOLVED, GroupStatus.RESOLVED],
            project__in=projects,
        )
        .order_by("-times_seen")
        .values_list("id", flat=True)
    )[:OPEN_PR_MAX_RECENT_ISSUES]
    project_ids = [project.id for project in projects]

    # Fetch the latest event for each group, along with some other event data we'll need for
    # filtering by function names and file names.
    subquery = (
        Query(Entity("events"))
        .set_select(
            [
                Column("group_id"),
                Function(
                    "argMax",
                    [Column("event_id"), Column("timestamp")],
                    "event_id",
                ),
                Function(
                    "argMax",
                    [Column("title"), Column("timestamp")],
                    "title",
                ),
                Function(
                    "argMax",
                    [Column("exception_frames.filename"), Column("timestamp")],
                    "exception_frames.filename",
                ),
                Function(
                    "argMax",
                    [Column("exception_frames.function"), Column("timestamp")],
                    "exception_frames.function",
                ),
            ]
        )
        .set_groupby(
            [
                Column("group_id"),
            ]
        )
        .set_where(
            [
                Condition(Column("project_id"), Op.IN, project_ids),
                Condition(Column("group_id"), Op.IN, group_ids),
                Condition(Column("timestamp"), Op.GTE, event_timestamp_start),
                Condition(Column("timestamp"), Op.LT, event_timestamp_end),
            ]
        )
    )

    # Filter out groups whose event's stacktrace doesn't match the function names and file names.
    query = (
        Query(subquery)
        .set_select(
            [
                Column("group_id"),
                Column("event_id"),
                Column("title"),
            ]
        )
        .set_where(
            [
                BooleanCondition(
                    BooleanOp.OR,
                    [
                        BooleanCondition(
                            BooleanOp.AND,
                            [
                                Condition(
                                    Function(
                                        "arrayElement",
                                        (Column("exception_frames.filename"), stackframe_idx),
                                    ),
                                    Op.IN,
                                    sentry_filenames,
                                ),
                                simple_function_name_conditions(function_names, stackframe_idx),
                            ],
                        )
                        for stackframe_idx in range(-STACKFRAME_COUNT, 0)  # first n frames
                    ],
                ),
            ]
        )
        .set_limit(max_num_issues_per_file)
    )
    request = SnubaRequest(
        dataset=Dataset.Events.value,
        app_id="default",
        tenant_ids={"organization_id": projects[0].organization_id},
        query=query,
    )
    try:
        return raw_snql_query(request, referrer=Referrer.SEER_RPC.value)["data"]
    except Exception:
        logger.exception(
            "Seer fetch issues given patches Snuba query error",
            extra={"query": request.to_dict()["query"], "run_id": run_id},
        )
        return []


def _add_event_details(
    projects: list[Project],
    issues_result_set: list[dict[str, Any]],
    event_timestamp_start: datetime | None,
    event_timestamp_end: datetime | None,
) -> list[dict[str, Any]]:
    """
    Bulk-fetch the events corresponding to the issues, and bulk-serialize them.
    """
    if not issues_result_set:
        return []
    event_filter = eventstore.Filter(
        start=event_timestamp_start,
        end=event_timestamp_end,
        event_ids=[group_dict["event_id"] for group_dict in issues_result_set],
        project_ids=[project.id for project in projects],
    )
    events = eventstore.backend.get_events(
        filter=event_filter,
        referrer=Referrer.SEER_RPC.value,
        tenant_ids={"organization_id": projects[0].organization_id},
    )
    serialized_events: list[EventSerializerResponse] = serialize(
        events, serializer=EventSerializer()
    )
    return [
        {  # Structured like seer.automation.models.IssueDetails
            "id": int(event_dict["groupID"]),
            "title": event_dict["title"],
            "events": [event_dict],
        }
        for event_dict in serialized_events
        if event_dict["groupID"] is not None
    ]


def get_issues_with_event_details_for_file(
    projects: list[Project],
    sentry_filenames: list[str],
    function_names: list[str],
    max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
    run_id: int | None = None,
) -> list[dict[str, Any]]:
    event_timestamp_start = datetime.now(UTC) - timedelta(days=NUM_DAYS_AGO)
    event_timestamp_end = datetime.now(UTC)
    issues_result_set = _get_issues_for_file(
        projects,
        sentry_filenames,
        function_names,
        event_timestamp_start,
        event_timestamp_end,
        max_num_issues_per_file=max_num_issues_per_file,
        run_id=run_id,
    )
    issues = _add_event_details(
        projects, issues_result_set, event_timestamp_start, event_timestamp_end
    )
    return issues


def _left_truncated_paths(filename: str, max_num_paths: int = 2) -> list[str]:
    """
    Example::

        paths = _left_truncated_paths("src/seer/automation/agent/client.py", 2)
        assert paths == [
            "seer/automation/agent/client.py",
            "automation/agent/client.py",
        ]
    """
    path = Path(filename)
    parts = list(path.parts)
    num_dirs = len(parts) - 1  # -1 for the filename
    num_paths = min(max_num_paths, num_dirs)

    result = []
    for _ in range(num_paths):
        parts.pop(0)
        result.append(str(Path(*parts)))
    return result


def _get_projects_and_filenames_from_source_file(
    org_id: int, repo_id: int, pr_filename: str, max_num_left_truncated_paths: int = 2
) -> tuple[set[Project], set[str]]:
    # Fetch the code mappings in which the source_root is a substring at the start of pr_filename
    code_mappings = (
        RepositoryProjectPathConfig.objects.filter(
            organization_id=org_id,
            repository_id=repo_id,
        )
        .annotate(substring_match=StrIndex(Value(pr_filename), "source_root"))
        .filter(substring_match=1)
    )
    projects_set = {code_mapping.project for code_mapping in code_mappings}
    sentry_filenames = {
        pr_filename.replace(code_mapping.source_root, code_mapping.stack_root, 1)
        for code_mapping in code_mappings
    }
    # The code-mapped filenames alone aren't enough. They don't work for the seer app, for example.
    # We can tolerate potential false positives if downstream uses of this data filter
    # out irrelevant issues.
    sentry_filenames.add(pr_filename)
    sentry_filenames.update(_left_truncated_paths(pr_filename, max_num_left_truncated_paths))
    return projects_set, sentry_filenames


def get_issues_related_to_function_names(
    *,
    organization_id: int,
    provider: str,
    external_id: str,
    filename_to_function_names: dict[str, list[str]],
    max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
    run_id: int | None = None,
) -> dict[str, list[dict[str, Any]]]:
    """
    Get issues related to each file by matching inputted filename-functions with
    filename-functions in the issue's event's stacktrace.

    Assumes `filename_to_function_names` are from one repository.

    Each issue includes its latest serialized event.
    """

    try:
        repo = Repository.objects.get(
            organization_id=organization_id, provider=provider, external_id=external_id
        )
    except Repository.DoesNotExist:
        logger.exception(
            "Repo doesn't exist",
            extra={
                "organization_id": organization_id,
                "provider": provider,
                "external_id": external_id,
                "run_id": run_id,
            },
        )
        return {}

    repo_id = repo.id

    filename_to_issues = {}
    for filename, function_names in filename_to_function_names.items():
        logger_extra = {"file": filename, "run_id": run_id}
        logger.info("Processing file", extra=logger_extra)

        # Only add the file to filename_to_issues if it makes it to the querying step.
        if not function_names:
            logger.warning("No function names", extra=logger_extra)
            continue
        logger.info("Function names", extra=logger_extra | {"function_names": function_names})

        projects, sentry_filenames = _get_projects_and_filenames_from_source_file(
            organization_id, repo_id, filename
        )
        if not projects:
            logger.error("No projects", extra=logger_extra)
            continue

        issues = get_issues_with_event_details_for_file(
            list(projects),
            list(sentry_filenames),
            list(function_names),
            max_num_issues_per_file=max_num_issues_per_file,
            run_id=run_id,
        )
        if issues:
            logger.info("Found issues", extra=logger_extra | {"num_issues": len(issues)})
        else:
            logger.warning("No issues found", extra=logger_extra)
        filename_to_issues[filename] = issues

    return filename_to_issues


def get_issues_related_to_file_patches(
    *,
    organization_id: int,
    provider: str,
    external_id: str,
    pr_files: list[PrFile],
    max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
    run_id: int | None = None,
) -> dict[str, list[dict[str, Any]]]:
    """
    Get issues related to each file by matching filename-functions parsed from file patches with
    filename-functions in the issue's event's stacktrace.

    Assumes `pr_files` are from one repository.

    Each issue includes its latest serialized event.
    """

    pr_files = safe_for_fetching_issues(pr_files)
    pullrequest_files = [
        PullRequestFile(filename=file["filename"], patch=file["patch"]) for file in pr_files
    ]

    filename_to_function_names = {}
    for file in pullrequest_files:
        file_extension = file.filename.split(".")[-1]
        if file_extension not in patch_parsers_more:
            logger.warning("No language parser", extra={"file": file.filename, "run_id": run_id})
            continue
        language_parser = patch_parsers_more[file_extension]
        function_names = language_parser.extract_functions_from_patch(file.patch)
        filename_to_function_names[file.filename] = list(function_names)

    return get_issues_related_to_function_names(
        organization_id=organization_id,
        provider=provider,
        external_id=external_id,
        filename_to_function_names=filename_to_function_names,
        max_num_issues_per_file=max_num_issues_per_file,
        run_id=run_id,
    )
