from typing import Any, Literal

from posthoganalytics import capture_exception
from pydantic import BaseModel, Field

from posthog.schema import MaxExperimentSummaryContext

from posthog.models import Experiment, FeatureFlag
from posthog.sync import database_sync_to_async

from ee.hogai.llm import MaxChatOpenAI
from ee.hogai.tool import MaxTool

from .prompts import EXPERIMENT_SUMMARY_BAYESIAN_PROMPT, EXPERIMENT_SUMMARY_FREQUENTIST_PROMPT

MAX_METRICS_TO_SUMMARIZE = 20


class CreateExperimentArgs(BaseModel):
    name: str = Field(description="Experiment name - should clearly describe what is being tested")
    feature_flag_key: str = Field(
        description="Feature flag key (letters, numbers, hyphens, underscores only). Will create a new flag if it doesn't exist."
    )
    description: str | None = Field(
        default=None,
        description="Detailed description of the experiment hypothesis, what changes are being tested, and expected outcomes",
    )
    type: Literal["product", "web"] = Field(
        default="product",
        description="Experiment type: 'product' for backend/API changes, 'web' for frontend UI changes",
    )


class CreateExperimentTool(MaxTool):
    name: Literal["create_experiment"] = "create_experiment"
    description: str = """
Create a new A/B test experiment in the current project.

Experiments allow you to test changes with a controlled rollout and measure their impact.

Use this tool when the user wants to:
- Create a new A/B test experiment
- Set up a controlled experiment to test changes
- Test variants of a feature with users

Examples:
- "Create an experiment to test the new checkout flow"
- "Set up an A/B test for our pricing page redesign"
- "Create an experiment called 'homepage-cta-test' to test different call-to-action buttons

**IMPORTANT**: You must first find or create a multivariate feature flag using `create_feature_flag`, with at least two variants (control and test)."
    """.strip()
    context_prompt_template: str = "Creates a new A/B test experiment in the project"
    args_schema: type[BaseModel] = CreateExperimentArgs

    async def _arun_impl(
        self,
        name: str,
        feature_flag_key: str,
        description: str | None = None,
        type: Literal["product", "web"] = "product",
    ) -> tuple[str, dict[str, Any] | None]:
        # Validate inputs
        if not name or not name.strip():
            return "Experiment name cannot be empty", {"error": "invalid_name"}

        if not feature_flag_key or not feature_flag_key.strip():
            return "Feature flag key cannot be empty", {"error": "invalid_flag_key"}

        @database_sync_to_async
        def create_experiment() -> Experiment:
            # Check if experiment with this name already exists
            existing_experiment = Experiment.objects.filter(team=self._team, name=name, deleted=False).first()
            if existing_experiment:
                raise ValueError(f"An experiment with name '{name}' already exists")

            try:
                feature_flag = FeatureFlag.objects.get(team=self._team, key=feature_flag_key, deleted=False)
            except FeatureFlag.DoesNotExist:
                raise ValueError(f"Feature flag '{feature_flag_key}' does not exist")

            # Validate that the flag has multivariate variants
            multivariate = feature_flag.filters.get("multivariate")
            if not multivariate or not multivariate.get("variants"):
                raise ValueError(
                    f"Feature flag '{feature_flag_key}' must have multivariate variants to be used in an experiment. "
                    f"Create the flag with variants first using the create_feature_flag tool."
                )

            variants = multivariate["variants"]
            if len(variants) < 2:
                raise ValueError(
                    f"Feature flag '{feature_flag_key}' must have at least 2 variants for an experiment (e.g., control and test)"
                )

            # If flag already exists and is already used by another experiment, raise error
            existing_experiment_with_flag = Experiment.objects.filter(feature_flag=feature_flag, deleted=False).first()
            if existing_experiment_with_flag:
                raise ValueError(
                    f"Feature flag '{feature_flag_key}' is already used by experiment '{existing_experiment_with_flag.name}'"
                )

            # Use the actual variants from the feature flag
            feature_flag_variants = [
                {
                    "key": variant["key"],
                    "name": variant.get("name", variant["key"]),
                    "rollout_percentage": variant["rollout_percentage"],
                }
                for variant in variants
            ]

            # Create the experiment as a draft (no start_date)
            experiment = Experiment.objects.create(
                team=self._team,
                created_by=self._user,
                name=name,
                description=description or "",
                type=type,
                feature_flag=feature_flag,
                filters={},  # Empty filters for draft
                parameters={
                    "feature_flag_variants": feature_flag_variants,
                    "minimum_detectable_effect": 30,
                },
                metrics=[],
                metrics_secondary=[],
            )

            return experiment

        try:
            experiment = await create_experiment()
            experiment_url = f"/project/{self._team.project_id}/experiments/{experiment.id}"

            return (
                f"Successfully created experiment '{name}'. "
                f"The experiment is in draft mode - you can configure metrics and launch it at {experiment_url}",
                {
                    "experiment_id": experiment.id,
                    "experiment_name": experiment.name,
                    "feature_flag_key": feature_flag_key,
                    "type": type,
                    "url": experiment_url,
                },
            )
        except ValueError as e:
            return f"Failed to create experiment: {str(e)}", {"error": str(e)}
        except Exception as e:
            capture_exception(e)
            return f"Failed to create experiment: {str(e)}", {"error": "creation_failed"}


class ExperimentSummaryArgs(BaseModel):
    """
    Analyze experiment results to generate an executive summary with key insights and recommendations.
    All experiment data and results are automatically provided from context.
    """


class ExperimentSummaryOutput(BaseModel):
    """Structured output for experiment summary"""

    key_metrics: list[str] = Field(description="Summary of key metric performance", max_length=20)


EXPERIMENT_SUMMARY_TOOL_DESCRIPTION = """
Use this tool to analyze experiment results and generate an executive summary with key insights and recommendations.
The tool processes experiment data including metrics, statistical significance, and variant performance to provide actionable insights.
It works with both Bayesian and Frequentist statistical methods and automatically adapts to the experiment's configuration.

# Examples of when to use the experiment_results_summary tool

<example>
User: Can you summarize the results of my experiment?
Assistant: I'll analyze your experiment results and provide a summary with key insights.
*Uses experiment_results_summary tool*
Assistant: Based on the analysis of your experiment results...

<reasoning>
The assistant used the experiment_results_summary tool because:
1. The user is asking for a summary of experiment results
2. The tool can analyze the statistical data and provide actionable insights
</reasoning>
</example>

<example>
User: What are the key takeaways from this A/B test?
Assistant: Let me analyze the experiment results to identify the key takeaways.
*Uses experiment_results_summary tool*
Assistant: The key takeaways from your A/B test are...

<reasoning>
The assistant used the experiment_results_summary tool because:
1. The user wants to understand the main findings from their experiment
2. The tool can extract and summarize the most important metrics and outcomes
</reasoning>
</example>
""".strip()


class ExperimentSummaryTool(MaxTool):
    name: str = "experiment_results_summary"
    description: str = EXPERIMENT_SUMMARY_TOOL_DESCRIPTION
    context_prompt_template: str = "Analyzes experiment results and generates executive summaries with key insights."

    args_schema: type[BaseModel] = ExperimentSummaryArgs

    async def _analyze_experiment(self, context: MaxExperimentSummaryContext) -> ExperimentSummaryOutput:
        """Analyze experiment and generate summary."""
        try:
            if context.stats_method not in ("bayesian", "frequentist"):
                raise ValueError(f"Unsupported statistical method: {context.stats_method}")

            prompt_template = (
                EXPERIMENT_SUMMARY_BAYESIAN_PROMPT
                if context.stats_method == "bayesian"
                else EXPERIMENT_SUMMARY_FREQUENTIST_PROMPT
            )

            formatted_data = self._format_experiment_for_llm(context)

            llm = MaxChatOpenAI(
                user=self._user,
                team=self._team,
                model="gpt-4.1",
                temperature=0.1,
                billable=True,
            ).with_structured_output(ExperimentSummaryOutput)

            formatted_prompt = prompt_template.replace("{{{experiment_data}}}", formatted_data)

            analysis_result = await llm.ainvoke([{"role": "system", "content": formatted_prompt}])

            if isinstance(analysis_result, dict):
                return ExperimentSummaryOutput(**analysis_result)
            return analysis_result

        except Exception as e:
            capture_exception(
                e,
                properties={"team_id": self._team.id, "user_id": self._user.id, "experiment_id": context.experiment_id},
            )
            return ExperimentSummaryOutput(key_metrics=[f"Analysis failed: {str(e)}"])

    def _format_experiment_for_llm(self, context: MaxExperimentSummaryContext) -> str:
        """Format experiment data for LLM consumption."""
        lines = []

        lines.append(f"Statistical method: {context.stats_method.title()}")
        lines.append(f"Experiment: {context.experiment_name}")

        if context.description:
            lines.append(f"Hypothesis: {context.description}")

        if context.variants:
            lines.append(f"\nVariants: {', '.join(context.variants)}")

        if context.exposures:
            exposures = context.exposures
            lines.append("\nExposures:")
            total = sum(exposures.values())
            lines.append(f"  Total: {int(total)}")

            for variant_key, count in exposures.items():
                if variant_key == "$multiple":
                    continue
                percentage = (count / total * 100) if total > 0 else 0
                lines.append(f"  {variant_key}: {int(count)} ({percentage:.1f}%)")

            if "$multiple" in exposures:
                multiple_count = exposures.get("$multiple", 0)
                lines.append(f"  $multiple: {int(multiple_count)} ({multiple_count / total * 100:.1f}%)")
                lines.append("  [Quality Warning: Users exposed to multiple variants detected]")

        if not context.primary_metrics_results and not context.secondary_metrics_results:
            return "\n".join(lines)

        lines.append("\nResults:")

        def format_metrics_section(metrics: list, section_name: str) -> None:
            """Helper to format a section of metrics (primary or secondary)."""
            if not metrics:
                return

            lines.append(f"\n{section_name}:")
            for metric in metrics:
                lines.append(f"\nMetric: {metric.name}")

                if not metric.variant_results:
                    continue

                for variant in metric.variant_results:
                    lines.append(f"  {variant.key}:")

                    if context.stats_method == "bayesian":
                        if hasattr(variant, "chance_to_win") and variant.chance_to_win is not None:
                            lines.append(f"    Chance to win: {variant.chance_to_win:.1%}")

                        if hasattr(variant, "credible_interval") and variant.credible_interval:
                            ci_low, ci_high = variant.credible_interval[:2]
                            lines.append(f"    95% credible interval: {ci_low:.1%} - {ci_high:.1%}")

                        lines.append(f"    Significant: {'Yes' if variant.significant else 'No'}")
                    else:
                        if hasattr(variant, "p_value") and variant.p_value is not None:
                            lines.append(f"    P-value: {variant.p_value:.4f}")

                        if hasattr(variant, "confidence_interval") and variant.confidence_interval:
                            ci_low, ci_high = variant.confidence_interval[:2]
                            lines.append(f"    95% confidence interval: {ci_low:.1%} - {ci_high:.1%}")

                        lines.append(f"    Significant: {'Yes' if variant.significant else 'No'}")

        format_metrics_section(context.primary_metrics_results[:10], "Primary Metrics")
        format_metrics_section(context.secondary_metrics_results[:10], "Secondary Metrics")

        return "\n".join(lines)

    def _format_summary_for_user(self, summary: ExperimentSummaryOutput, experiment_name: str) -> str:
        """Format the structured summary into a user-friendly message."""
        lines = []
        lines.append(f"✅ **Experiment Summary: '{experiment_name}'**")

        if summary.key_metrics:
            lines.append("\n**📊 Key Metrics:**")
            for metric in summary.key_metrics:
                lines.append(f"• {metric}")

        return "\n".join(lines)

    async def _arun_impl(self) -> tuple[str, dict[str, Any]]:
        try:
            try:
                validated_context = MaxExperimentSummaryContext(**self.context)
            except Exception as e:
                error_details = str(e)
                error_context = {
                    "error": "invalid_context",
                    "details": error_details,
                }

                if hasattr(e, "__cause__") and e.__cause__:
                    error_context["validation_cause"] = str(e.__cause__)

                capture_exception(
                    e,
                    properties={
                        "team_id": self._team.id,
                        "user_id": self._user.id,
                        "context_keys": list(self.context.keys()) if isinstance(self.context, dict) else None,
                        "experiment_id": self.context.get("experiment_id") if isinstance(self.context, dict) else None,
                    },
                )

                return f"❌ Invalid experiment context: {error_details}", error_context

            if not validated_context.primary_metrics_results and not validated_context.secondary_metrics_results:
                return "❌ No experiment results to analyze", {
                    "error": "no_results",
                    "details": "No metrics results provided in context",
                }

            summary_result = await self._analyze_experiment(validated_context)
            user_message = self._format_summary_for_user(summary_result, validated_context.experiment_name)

            return user_message, {
                "experiment_id": validated_context.experiment_id,
                "experiment_name": validated_context.experiment_name,
                "summary": summary_result.model_dump(),
            }

        except Exception as e:
            capture_exception(e, properties={"team_id": self._team.id, "user_id": self._user.id})
            return f"❌ Failed to summarize experiment: {str(e)}", {"error": "summary_failed", "details": str(e)}
