"""
Data analysis and visualization prompts for FastMCP
"""

from fastmcp import FastMCP, Context
from typing import List, Optional, Dict, Any
from pydantic import Field


class DataPrompts:
    """Data analysis and visualization prompts for FastMCP"""

    def __init__(self, mcp: FastMCP):
        """Initialize and register data prompts with the FastMCP server"""
        self.mcp = mcp

    def register(self):
        """Register all data analysis and visualization prompts"""
        self.mcp.prompt(
            name="data_visualization",
            description="Generate a prompt for creating data visualizations",
            tags={"data", "visualization", "analysis"},
        )(self.data_visualization)

        self.mcp.prompt(
            name="statistical_analysis",
            description="Generate a prompt for comprehensive statistical analysis",
            tags={"data", "statistics", "analysis"},
        )(self.statistical_analysis)

        self.mcp.prompt(
            name="machine_learning_project",
            description="Generate a prompt for machine learning project planning and implementation",
            tags={"data", "machine-learning", "modeling"},
        )(self.machine_learning_project)

        self.mcp.prompt(
            name="data_quality_assessment",
            description="Generate a prompt for comprehensive data quality evaluation",
            tags={"data", "quality", "validation"},
        )(self.data_quality_assessment)

    async def data_visualization(
        self,
        ctx: Context,
        data_description: str = Field(
            description="Description of the data to be visualized"
        ),
        visualization_goal: str = Field(description="What insights you want to convey"),
        chart_types: List[str] = Field(
            default=[], description="Preferred chart types or styles"
        ),
        audience: str = Field(
            default="general", description="Target audience for the visualization"
        ),
    ) -> str:
        """Generate a prompt for data visualization guidance."""

        await ctx.info(f"Creating data visualization prompt for: {data_description}")

        # Ensure chart_types is a list, not a FieldInfo object
        chart_list = chart_types if isinstance(chart_types, list) else []
        chart_preferences = (
            "\n".join(f"- {chart}" for chart in chart_list)
            if chart_list
            else "Recommend appropriate chart types based on the data and goals"
        )

        prompt = f"""Create effective data visualizations for the following:

**Data Description**: {data_description}

**Visualization Goal**: {visualization_goal}

**Target Audience**: {audience}

**Chart Type Preferences**:
{chart_preferences}

**Visualization Strategy**:

1. **Data Analysis and Preparation**
   - Analyze the structure and characteristics of the data
   - Identify key variables and relationships
   - Clean and prepare data for visualization
   - Determine the most important insights to highlight

2. **Chart Selection**
   - Recommend the most appropriate chart types for the data
   - Justify why each chart type is suitable
   - Consider alternative visualization approaches
   - Think about interactive vs. static visualizations

3. **Design Principles**
   - Apply visual design best practices
   - Choose appropriate colors, fonts, and layouts
   - Ensure accessibility for different audiences
   - Maintain clarity and avoid chart junk

4. **Visualization Implementation**
   - Provide code or step-by-step instructions for creating the charts
   - Include proper labeling, titles, and legends
   - Add appropriate annotations and callouts
   - Ensure scales and axes are properly configured

5. **Storytelling Elements**
   - Structure visualizations to tell a clear story
   - Use progressive disclosure for complex data
   - Include context and explanatory text
   - Guide the viewer's attention to key insights

6. **Technical Considerations**
   - Recommend appropriate tools or libraries
   - Consider performance for large datasets
   - Plan for responsive design if needed
   - Think about export and sharing requirements

7. **Validation and Testing**
   - Review visualizations for accuracy
   - Test with representative users
   - Gather feedback and iterate
   - Verify that insights are clearly communicated

**Additional Guidelines**:
- Tailor complexity and detail level to {audience}
- Ensure visualizations serve the goal: {visualization_goal}
- Consider cultural and accessibility factors
- Plan for different viewing contexts (presentation, report, dashboard)

Provide specific, actionable guidance for creating impactful data visualizations."""

        return prompt

    def statistical_analysis(
        self,
        dataset_description: str = Field(
            description="Description of the dataset to analyze"
        ),
        research_questions: List[str] = Field(
            description="Specific questions to investigate"
        ),
        analysis_type: str = Field(
            description="Type of analysis needed (descriptive, inferential, predictive)"
        ),
        significance_level: float = Field(
            default=0.05, description="Significance level for statistical tests"
        ),
    ) -> str:
        """Generate a prompt for statistical analysis."""

        questions_text = "\n".join(
            f"{i+1}. {question}" for i, question in enumerate(research_questions)
        )

        prompt = f"""Perform a comprehensive statistical analysis of the following dataset:

**Dataset Description**: {dataset_description}

**Research Questions**:
{questions_text}

**Analysis Type**: {analysis_type}
**Significance Level**: {significance_level}

**Statistical Analysis Framework**:

1. **Exploratory Data Analysis (EDA)**
   - Examine data structure, types, and completeness
   - Generate descriptive statistics for all variables
   - Identify outliers, missing values, and data quality issues
   - Create distribution plots and correlation matrices

2. **Data Preparation**
   - Handle missing values appropriately
   - Address outliers and anomalies
   - Transform variables if necessary (log, standardization, etc.)
   - Create derived variables or features as needed

3. **Descriptive Statistics**
   - Calculate measures of central tendency and variability
   - Examine distributions and their characteristics
   - Identify patterns and relationships in the data
   - Summarize key findings from descriptive analysis

4. **Inferential Analysis**
   - Choose appropriate statistical tests for each research question
   - Check assumptions for selected tests
   - Perform hypothesis testing with α = {significance_level}
   - Calculate confidence intervals where appropriate

5. **Model Development** (if applicable)
   - Select appropriate modeling techniques
   - Split data into training and testing sets
   - Build and validate models
   - Assess model performance and assumptions

6. **Results Interpretation**
   - Interpret statistical test results in context
   - Discuss practical significance vs. statistical significance
   - Address limitations and potential biases
   - Connect findings back to original research questions

7. **Reporting and Visualization**
   - Create clear tables and figures for results
   - Use appropriate statistical notation
   - Provide executive summary of key findings
   - Include recommendations based on analysis

**Specific Deliverables**:
- Summary statistics table
- Relevant visualizations (histograms, box plots, scatter plots)
- Statistical test results with interpretation
- Confidence intervals and effect sizes
- Conclusions and recommendations

**Technical Requirements**:
- Use appropriate statistical software or programming language
- Document all assumptions and methodological choices
- Provide reproducible analysis code
- Include diagnostic plots and assumption checks

Ensure the analysis directly addresses each research question and provides actionable insights."""

        return prompt

    def machine_learning_project(
        self,
        problem_type: str = Field(
            description="ML problem type (classification, regression, clustering, etc.)"
        ),
        business_objective: str = Field(
            description="Business goal the ML solution should achieve"
        ),
        available_data: str = Field(
            description="Description of available data and features"
        ),
        success_metrics: List[str] = Field(description="How success will be measured"),
        constraints: List[str] = Field(
            default=[], description="Any constraints or limitations"
        ),
    ) -> str:
        """Generate a prompt for machine learning project development."""

        # Ensure lists are not FieldInfo objects
        metrics_list = success_metrics if isinstance(success_metrics, list) else []
        constraints_list = constraints if isinstance(constraints, list) else []

        metrics_text = "\n".join(f"- {metric}" for metric in metrics_list)
        constraints_text = (
            "\n".join(f"- {constraint}" for constraint in constraints_list)
            if constraints_list
            else "No specific constraints mentioned"
        )

        prompt = f"""Develop a comprehensive machine learning solution for the following:

**Problem Type**: {problem_type}
**Business Objective**: {business_objective}
**Available Data**: {available_data}

**Success Metrics**:
{metrics_text}

**Constraints**:
{constraints_text}

**ML Project Development Framework**:

1. **Problem Definition and Scope**
   - Clearly define the machine learning problem
   - Establish success criteria and evaluation metrics
   - Identify stakeholders and requirements
   - Set realistic expectations and timelines

2. **Data Assessment and Preparation**
   - Evaluate data quality, completeness, and relevance
   - Identify potential data sources and collection methods
   - Plan data cleaning and preprocessing steps
   - Address class imbalance, missing values, and outliers

3. **Exploratory Data Analysis**
   - Understand data distributions and patterns
   - Identify correlations and feature relationships
   - Detect potential biases or data leakage
   - Visualize key insights about the data

4. **Feature Engineering**
   - Create relevant features from raw data
   - Apply domain knowledge to generate meaningful variables
   - Handle categorical variables and text data
   - Consider feature scaling and normalization

5. **Model Selection and Development**
   - Choose appropriate algorithms for the {problem_type} problem
   - Implement baseline models for comparison
   - Experiment with multiple modeling approaches
   - Consider ensemble methods and advanced techniques

6. **Model Training and Validation**
   - Split data appropriately (train/validation/test)
   - Implement cross-validation strategies
   - Tune hyperparameters systematically
   - Monitor for overfitting and underfitting

7. **Model Evaluation**
   - Assess model performance using appropriate metrics
   - Analyze errors and model limitations
   - Compare models fairly and objectively
   - Validate results on holdout test set

8. **Model Interpretation and Explainability**
   - Understand which features drive predictions
   - Generate model explanations for stakeholders
   - Assess fairness and bias in model predictions
   - Document model behavior and limitations

9. **Deployment Planning**
   - Design model serving architecture
   - Plan for model monitoring and maintenance
   - Consider scalability and performance requirements
   - Develop rollback and update strategies

10. **Risk Assessment and Mitigation**
    - Identify potential risks and failure modes
    - Plan for edge cases and data drift
    - Establish monitoring and alerting systems
    - Create contingency plans

**Technical Implementation**:
- Use appropriate ML libraries and frameworks
- Implement reproducible code with version control
- Document all experimental choices and results
- Create automated pipelines where possible

**Business Integration**:
- Align technical solution with business objectives
- Communicate results in business terms
- Provide actionable insights and recommendations
- Plan for user adoption and change management

Develop a solution that balances technical excellence with practical business value."""

        return prompt

    def data_quality_assessment(
        self,
        data_source: str = Field(
            description="Source or description of the data to assess"
        ),
        intended_use: str = Field(description="How the data will be used"),
        quality_dimensions: List[str] = Field(
            default=["completeness", "accuracy", "consistency", "timeliness"],
            description="Quality dimensions to evaluate",
        ),
        critical_fields: List[str] = Field(
            default=[], description="Most important fields or columns"
        ),
    ) -> str:
        """Generate a prompt for data quality assessment."""

        # Ensure lists are not FieldInfo objects
        dimensions_list = (
            quality_dimensions if isinstance(quality_dimensions, list) else []
        )
        fields_list = critical_fields if isinstance(critical_fields, list) else []

        dimensions_text = "\n".join(f"- {dim.title()}" for dim in dimensions_list)
        critical_fields_text = (
            "\n".join(f"- {field}" for field in fields_list)
            if fields_list
            else "All fields equally important"
        )

        prompt = f"""Conduct a comprehensive data quality assessment for:

**Data Source**: {data_source}
**Intended Use**: {intended_use}

**Quality Dimensions to Evaluate**:
{dimensions_text}

**Critical Fields**:
{critical_fields_text}

**Data Quality Assessment Framework**:

1. **Data Profiling**
   - Analyze data structure and schema
   - Generate statistical summaries for each field
   - Identify data types, formats, and patterns
   - Document data lineage and collection methods

2. **Completeness Assessment**
   - Calculate missing value percentages by field
   - Identify patterns in missing data
   - Assess impact of missing values on intended use
   - Recommend strategies for handling missing data

3. **Accuracy Evaluation**
   - Validate data against known standards or sources
   - Check for logical inconsistencies and impossible values
   - Identify potential data entry errors
   - Assess accuracy of critical fields and calculations

4. **Consistency Analysis**
   - Check for format consistency within fields
   - Identify duplicate records and near-duplicates
   - Validate relationships between related fields
   - Assess consistency across different data sources

5. **Timeliness Review**
   - Evaluate data freshness and currency
   - Identify lag times in data updates
   - Assess temporal consistency and trends
   - Determine if data meets timeliness requirements

6. **Validity Verification**
   - Check adherence to business rules and constraints
   - Validate against predefined ranges and formats
   - Identify outliers and anomalous values
   - Assess conformance to data standards

7. **Uniqueness Analysis**
   - Identify and quantify duplicate records
   - Assess uniqueness of key identifier fields
   - Analyze impact of duplicates on analysis
   - Recommend deduplication strategies

8. **Data Quality Scoring**
   - Develop quality scores for each dimension
   - Create overall data quality metrics
   - Prioritize quality issues by business impact
   - Establish quality benchmarks and thresholds

**Quality Report Structure**:

1. **Executive Summary**
   - Overall data quality assessment
   - Key findings and recommendations
   - Business impact assessment
   - Priority actions required

2. **Detailed Findings**
   - Quality assessment by dimension
   - Field-level analysis and issues
   - Statistical summaries and visualizations
   - Specific examples of quality problems

3. **Impact Analysis**
   - How quality issues affect intended use: {intended_use}
   - Risk assessment for different quality problems
   - Cost-benefit analysis of quality improvements
   - Timeline for addressing critical issues

4. **Recommendations**
   - Data cleaning and improvement strategies
   - Process improvements to prevent future issues
   - Tools and technologies for ongoing monitoring
   - Governance and quality control measures

5. **Implementation Plan**
   - Prioritized action items with timelines
   - Resource requirements and responsibilities
   - Success metrics and monitoring approach
   - Quality maintenance procedures

**Technical Deliverables**:
- Data quality dashboard or scorecard
- Automated quality checking scripts
- Data profiling reports and visualizations
- Quality improvement recommendations with code examples

Provide actionable insights that enable data-driven decision making about data fitness for purpose."""

        return prompt
