"""Structural Text Analyzer MCP Server - Main Server Implementation"""

import asyncio
import logging
import os
import sys
import tempfile
import json
from typing import Dict, Any, Optional
import argparse
from pathlib import Path

from fastmcp import FastMCP
from dotenv import load_dotenv

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Load environment variables from .env file
env_path = Path(__file__).parent.parent.parent / '.env'
if env_path.exists():
    load_dotenv(env_path)
    logger.info(f"Loaded environment variables from {env_path}")
else:
    logger.info("No .env file found, using default settings")

# Create MCP server instance
mcp = FastMCP("Structural Text Analyzer Server")

# Import analyzer (will be done when server starts)
analyzer_instance: Optional[Any] = None

# Default configuration from environment variables
DEFAULT_ANALYSIS_TYPE = os.getenv('ANALYZER_TYPE', 'default')
DEFAULT_OUTPUT_DIR = os.getenv('ANALYZER_OUTPUT_DIR', './output')
DEFAULT_OUTPUT_FORMAT = os.getenv('ANALYZER_OUTPUT_FORMAT', 'markdown')


def initialize_analyzer():
    """Initialize the analyzer instance"""
    global analyzer_instance
    
    try:
        # Add current directory to Python path
        current_dir = str(Path(__file__).parent)
        if current_dir not in sys.path:
            sys.path.insert(0, current_dir)
        
        # Import with fallback for relative imports
        try:
            from .analyzer import StructuralTextAnalyzer
        except ImportError:
            # Try absolute import for package mode
            try:
                from analyzer import StructuralTextAnalyzer
            except ImportError:
                from structural_analyzer.analyzer import StructuralTextAnalyzer
        
        analyzer_instance = StructuralTextAnalyzer()
        
        logger.info("Initialized Structural Text Analyzer")
        
    except ImportError as e:
        logger.error(f"Failed to import analyzer: {e}")
        raise Exception(f"Could not initialize analyzer: {e}")


def get_analyzer(analysis_type: str = None) -> Any:
    """Get analyzer instance with specified type"""
    if not analyzer_instance:
        raise Exception("Analyzer not initialized")
    
    if analysis_type and analysis_type != 'default':
        try:
            from .analyzer import StructuralTextAnalyzer
        except ImportError:
            try:
                from analyzer import StructuralTextAnalyzer
            except ImportError:
                from structural_analyzer.analyzer import StructuralTextAnalyzer
        return StructuralTextAnalyzer.from_text_type(analysis_type)
    
    return analyzer_instance


@mcp.tool
def analyze_text_content(text: str, analysis_type: str = "", output_format: str = "summary") -> str:
    """Analyze text content directly for structural patterns, numerical anomalies, and trends
    
    Args:
        text: Text content to analyze
        analysis_type: Type of analysis ("computational_log", "academic_paper", "social_text", or "default")
        output_format: Level of detail ("summary", "detailed", "json")
        
    Returns:
        Analysis results formatted according to output_format
    """
    try:
        actual_analysis_type = analysis_type or DEFAULT_ANALYSIS_TYPE
        analyzer = get_analyzer(actual_analysis_type)
        
        logger.info(f"Analyzing text content ({len(text)} chars) with type: {actual_analysis_type}")
        
        # Perform analysis
        results = analyzer.analyze_text(text, "mcp_analysis")
        
        # Format output based on request
        if output_format == "json":
            return json.dumps(results, indent=2, default=str)
        elif output_format == "detailed":
            # Generate detailed report
            with tempfile.TemporaryDirectory() as temp_dir:
                analyzer.output_dir = temp_dir
                report_path = analyzer.generate_report()
                
                with open(report_path, 'r', encoding='utf-8') as f:
                    detailed_report = f.read()
                
                return detailed_report
        else:
            # Summary format
            summary = analyzer.get_summary()
            
            summary_text = f"""# Text Analysis Summary

## File Information
- Processing time: {summary['file_info']['processing_time']:.3f} seconds
- Size: {len(text):,} characters

## Text Metrics
- Characters: {summary['text_metrics']['characters']:,}
- Words: {summary['text_metrics']['words']:,}
- Sentences: {summary['text_metrics']['sentences']:,}
- Vocabulary: {summary['text_metrics']['vocabulary_size']:,} unique words
- Lexical diversity: {summary['text_metrics']['lexical_diversity']:.4f}

## Numerical Analysis
- Numbers found: {summary['numerical_metrics']['total_numbers']:,}
- Valid numbers: {summary['numerical_metrics']['valid_numbers']:,}
- Health score: {summary['numerical_metrics']['health_score']:.3f}/1.000

## Trend Analysis
- Sequences analyzed: {summary['trend_metrics']['sequences_analyzed']}
- Convergent sequences: {summary['trend_metrics']['convergent_sequences']}
- Divergent sequences: {summary['trend_metrics']['divergent_sequences']}

## Health Assessment
{_format_health_status(summary['numerical_metrics']['health_score'])}
"""
            
            return summary_text
            
    except Exception as e:
        logger.error(f"Text analysis failed: {str(e)}")
        return f"Text analysis failed: {str(e)}"


@mcp.tool
def analyze_text_file(file_path: str, analysis_type: str = "", output_format: str = "summary") -> str:
    """Analyze a text file for structural patterns, numerical anomalies, and trends
    
    Args:
        file_path: Path to the text file to analyze
        analysis_type: Type of analysis ("computational_log", "academic_paper", "social_text", or "default")
        output_format: Level of detail ("summary", "detailed", "json")
        
    Returns:
        Analysis results formatted according to output_format
    """
    try:
        # Validate file exists
        if not os.path.exists(file_path):
            return f"Error: File not found: {file_path}"
        
        actual_analysis_type = analysis_type or DEFAULT_ANALYSIS_TYPE
        analyzer = get_analyzer(actual_analysis_type)
        
        logger.info(f"Analyzing file: {file_path} with type: {actual_analysis_type}")
        
        # Perform analysis
        results = analyzer.analyze_file(file_path, "mcp_file_analysis")
        
        # Format output similar to analyze_text_content
        if output_format == "json":
            return json.dumps(results, indent=2, default=str)
        elif output_format == "detailed":
            with tempfile.TemporaryDirectory() as temp_dir:
                analyzer.output_dir = temp_dir
                report_path = analyzer.generate_report()
                
                with open(report_path, 'r', encoding='utf-8') as f:
                    detailed_report = f.read()
                
                return detailed_report
        else:
            # Summary format
            summary = analyzer.get_summary()
            
            summary_text = f"""# File Analysis Summary: {os.path.basename(file_path)}

## File Information
- Filename: {summary['file_info']['filename']}
- Size: {summary['file_info']['size_bytes']:,} bytes
- Processing time: {summary['file_info']['processing_time']:.3f} seconds

## Text Metrics
- Characters: {summary['text_metrics']['characters']:,}
- Words: {summary['text_metrics']['words']:,}
- Sentences: {summary['text_metrics']['sentences']:,}
- Vocabulary: {summary['text_metrics']['vocabulary_size']:,} unique words
- Lexical diversity: {summary['text_metrics']['lexical_diversity']:.4f}

## Numerical Analysis
- Numbers found: {summary['numerical_metrics']['total_numbers']:,}
- Valid numbers: {summary['numerical_metrics']['valid_numbers']:,}
- Health score: {summary['numerical_metrics']['health_score']:.3f}/1.000

## Trend Analysis
- Sequences analyzed: {summary['trend_metrics']['sequences_analyzed']}
- Convergent sequences: {summary['trend_metrics']['convergent_sequences']}
- Divergent sequences: {summary['trend_metrics']['divergent_sequences']}

## Health Assessment
{_format_health_status(summary['numerical_metrics']['health_score'])}
"""
            
            return summary_text
            
    except Exception as e:
        logger.error(f"File analysis failed: {str(e)}")
        return f"File analysis failed: {str(e)}"


@mcp.tool
def extract_numerical_patterns(text: str, detect_anomalies: bool = True) -> str:
    """Extract and analyze numerical patterns from text
    
    Args:
        text: Text content to analyze for numerical patterns
        detect_anomalies: Whether to detect numerical anomalies (NaN, Inf, etc.)
        
    Returns:
        Numerical pattern analysis results
    """
    try:
        analyzer = get_analyzer()
        
        logger.info(f"Extracting numerical patterns from text ({len(text)} chars)")
        
        # Extract numerical patterns
        num_results = analyzer.numerical_analyzer.analyze(text)
        
        # Format results
        summary = num_results.get('summary', {})
        distribution = num_results.get('distribution', {})
        anomalies = num_results.get('anomalies', {}) if detect_anomalies else {}
        
        result_text = f"""# Numerical Pattern Analysis

## Numbers Detected
- Total numbers found: {summary.get('total_numbers_found', 0):,}
- Valid numbers: {summary.get('valid_numbers', 0):,}
- Invalid numbers: {summary.get('invalid_numbers', 0):,}
- Scientific notation: {summary.get('scientific_notation_count', 0):,}

## Statistical Distribution
"""
        
        if distribution:
            result_text += f"""- Mean: {distribution.get('mean', 0):.6e}
- Median: {distribution.get('median', 0):.6e}
- Standard deviation: {distribution.get('std', 0):.6e}
- Range: [{distribution.get('min', 0):.6e}, {distribution.get('max', 0):.6e}]
- Positive numbers: {distribution.get('positives_count', 0):,}
- Negative numbers: {distribution.get('negatives_count', 0):,}
- Zero values: {distribution.get('zeros_count', 0):,}
"""
        
        if detect_anomalies and anomalies:
            anomaly_summary = anomalies.get('summary', {})
            anomaly_types = anomaly_summary.get('anomaly_types', {})
            
            result_text += f"""
## Anomaly Detection
- Total anomalies: {anomaly_summary.get('total_anomalies', 0):,}
- Anomaly rate: {anomaly_summary.get('anomaly_rate', 0)*100:.2f}%

### Anomaly Breakdown:
"""
            for anomaly_type, count in anomaly_types.items():
                if count > 0:
                    result_text += f"- {anomaly_type.replace('_', ' ').title()}: {count:,}\n"
            
            health_score = num_results.get('health_score', 0)
            result_text += f"""
## Health Assessment
- Health score: {health_score:.3f}/1.000
- Status: {_format_health_status(health_score)}
"""
        
        return result_text
        
    except Exception as e:
        logger.error(f"Numerical pattern extraction failed: {str(e)}")
        return f"Numerical pattern extraction failed: {str(e)}"


@mcp.tool
def analyze_convergence_trends(text: str, sequence_patterns: str = "") -> str:
    """Analyze convergence and trend patterns in numerical sequences
    
    Args:
        text: Text containing numerical sequences to analyze
        sequence_patterns: Optional comma-separated regex patterns to extract specific sequences
        
    Returns:
        Convergence and trend analysis results
    """
    try:
        analyzer = get_analyzer()
        
        logger.info(f"Analyzing convergence trends in text ({len(text)} chars)")
        
        # Extract sequences
        if sequence_patterns:
            patterns = [p.strip() for p in sequence_patterns.split(',')]
            sequences = analyzer.numerical_analyzer.extract_sequences(text, patterns)
        else:
            sequences = analyzer.numerical_analyzer.extract_sequences(text)
        
        if not sequences:
            return "No numerical sequences found for trend analysis."
        
        # Analyze trends
        trend_results = analyzer.trend_analyzer.analyze_multiple_sequences(sequences)
        
        # Format results
        result_text = f"""# Convergence and Trend Analysis

## Sequences Found: {len(sequences)}

"""
        
        for seq_name, seq_analysis in trend_results.items():
            if seq_name == 'comparative_analysis':
                continue
            
            if isinstance(seq_analysis, dict):
                # Get sequence metadata for better description
                metadata = seq_analysis.get('sequence_metadata', {})
                description = metadata.get('description', 'Numerical sequence')
                sequence_type = metadata.get('sequence_type', 'general_numeric')
                
                # Create type emoji
                type_emoji = {
                    'convergence_metric': '🎯',
                    'error_metric': '⚠️',
                    'algorithm_parameter': '⚙️',
                    'performance_metric': '📈',
                    'iteration_progress': '🔄',
                    'convergence_rate': '📊',
                    'general_numeric': '📋'
                }.get(sequence_type, '📊')
                
                readable_name = seq_name.replace('_', ' ').title()
                trend_type = seq_analysis.get('trend_type', 'unknown')
                data_points = seq_analysis.get('data_points', 0)
                
                result_text += f"""### {type_emoji} Sequence: {readable_name}
- Description: {description}
- Data points: {data_points}
- Trend type: {trend_type.upper()}
"""
                
                # Add convergence info if available
                convergence = seq_analysis.get('convergence_analysis', {})
                if convergence:
                    conv_rate = convergence.get('convergence_rate', 'unknown')
                    conv_ratio = convergence.get('convergence_ratio', 'N/A')
                    
                    result_text += f"- Convergence rate: {conv_rate.upper()}\n"
                    if isinstance(conv_ratio, (int, float)):
                        result_text += f"- Convergence ratio: {conv_ratio:.6f}\n"
                
                # Add trend metrics
                trend_metrics = seq_analysis.get('trend_metrics', {})
                if trend_metrics:
                    result_text += f"- Confidence: {trend_metrics.get('confidence_score', 0):.3f}\n"
                    result_text += f"- Stability: {trend_metrics.get('stability_score', 0):.3f}\n"
                
                result_text += "\n"
        
        # Add comparative analysis if available
        if 'comparative_analysis' in trend_results:
            comp = trend_results['comparative_analysis']
            result_text += f"""## Comparative Analysis
- Total sequences: {comp.get('sequence_count', 0)}
- Average length: {comp.get('length_statistics', {}).get('mean_length', 0):.1f}
"""
            
            conv_comp = comp.get('convergence_comparison', {})
            if conv_comp:
                result_text += f"- Best convergence ratio: {conv_comp.get('best_convergence', 0):.6f}\n"
                result_text += f"- Mean convergence ratio: {conv_comp.get('mean_convergence_ratio', 0):.6f}\n"
        
        return result_text
        
    except Exception as e:
        logger.error(f"Convergence trend analysis failed: {str(e)}")
        return f"Convergence trend analysis failed: {str(e)}"


@mcp.tool
def assess_text_quality(text: str, include_recommendations: bool = True) -> str:
    """Assess text quality including readability, complexity, and structure
    
    Args:
        text: Text content to assess
        include_recommendations: Whether to include improvement recommendations
        
    Returns:
        Text quality assessment results
    """
    try:
        analyzer = get_analyzer()
        
        logger.info(f"Assessing text quality for text ({len(text)} chars)")
        
        # Analyze text structure
        text_results = analyzer.text_analyzer.analyze(text)
        
        # Extract key metrics
        basic_stats = text_results.get('basic_statistics', {})
        complexity = text_results.get('complexity_metrics', {})
        
        result_text = f"""# Text Quality Assessment

## Basic Statistics
- Characters: {basic_stats.get('total_characters', 0):,}
- Words: {basic_stats.get('total_words', 0):,}
- Sentences: {basic_stats.get('total_sentences', 0):,}
- Average sentence length: {basic_stats.get('avg_sentence_length', 0):.1f} words
- Vocabulary size: {basic_stats.get('vocabulary_size', 0):,}
- Lexical diversity: {basic_stats.get('lexical_diversity', 0):.4f}

## Complexity Analysis
"""
        
        complexity_score = complexity.get('complexity_score', 0)
        result_text += f"- Complexity score: {complexity_score:.4f}\n"
        
        if complexity_score > 0.7:
            result_text += "- Level: HIGH complexity\n"
            result_text += "- Reading difficulty: Advanced\n"
        elif complexity_score > 0.4:
            result_text += "- Level: MODERATE complexity\n"
            result_text += "- Reading difficulty: Intermediate\n"
        else:
            result_text += "- Level: LOW complexity\n"
            result_text += "- Reading difficulty: Basic\n"
        
        # Readability metrics
        readability = complexity.get('readability_metrics', {})
        if readability:
            flesch_score = readability.get('flesch_reading_ease', 0)
            result_text += f"""
## Readability Metrics
- Flesch Reading Ease: {flesch_score:.1f}
- Grade level: """
            
            if flesch_score >= 70:
                result_text += "7th grade (Easy)\n"
            elif flesch_score >= 50:
                result_text += "High school (Moderate)\n"
            elif flesch_score >= 30:
                result_text += "College (Difficult)\n"
            else:
                result_text += "Graduate (Very difficult)\n"
        
        # Recommendations
        if include_recommendations:
            result_text += "\n## Recommendations\n"
            
            recommendations = []
            
            if complexity_score > 0.8:
                recommendations.append("Consider simplifying language and sentence structure")
            
            if basic_stats.get('avg_sentence_length', 0) > 25:
                recommendations.append("Break down long sentences for better readability")
            
            if basic_stats.get('lexical_diversity', 0) < 0.3:
                recommendations.append("Increase vocabulary variety to avoid repetition")
            
            if readability.get('flesch_reading_ease', 100) < 30:
                recommendations.append("Improve readability by using simpler words and shorter sentences")
            
            if not recommendations:
                recommendations.append("Text quality is good - continue current writing style")
            
            for i, rec in enumerate(recommendations, 1):
                result_text += f"{i}. {rec}\n"
        
        return result_text
        
    except Exception as e:
        logger.error(f"Text quality assessment failed: {str(e)}")
        return f"Text quality assessment failed: {str(e)}"


@mcp.tool
def get_analysis_config(config_type: str = "all") -> str:
    """Get available analysis configurations and their descriptions
    
    Args:
        config_type: Which configuration to describe ("all", "computational_log", "academic_paper", "social_text")
        
    Returns:
        Configuration information and descriptions
    """
    try:
        config_info = {
            "computational_log": {
                "description": "Optimized for numerical computation logs and solver output",
                "features": [
                    "Enhanced convergence analysis",
                    "Scientific notation detection", 
                    "Residual norm tracking",
                    "Lower thresholds for large numbers",
                    "Shorter minimum sequence lengths"
                ],
                "use_cases": [
                    "HIP/ROCm debug output",
                    "Iterative solver logs",
                    "Performance monitoring",
                    "Numerical simulation results"
                ]
            },
            "academic_paper": {
                "description": "Optimized for academic and research papers",
                "features": [
                    "Enhanced readability analysis",
                    "Vocabulary assessment",
                    "Longer sentence optimization",
                    "Minimal numerical focus",
                    "Citation pattern detection"
                ],
                "use_cases": [
                    "Research papers",
                    "Technical documentation",
                    "Educational content",
                    "Literature analysis"
                ]
            },
            "social_text": {
                "description": "Optimized for social media and informal text",
                "features": [
                    "Short sentence optimization",
                    "Informal language patterns",
                    "Emoji handling",
                    "Minimal anomaly detection",
                    "Higher numerical thresholds"
                ],
                "use_cases": [
                    "Social media posts",
                    "Chat conversations",
                    "Informal communications",
                    "User-generated content"
                ]
            }
        }
        
        if config_type == "all":
            result_text = "# Analysis Configuration Types\n\n"
            
            for name, info in config_info.items():
                result_text += f"""## {name.title().replace('_', ' ')}
**Description:** {info['description']}

**Key Features:**
"""
                for feature in info['features']:
                    result_text += f"- {feature}\n"
                
                result_text += "\n**Use Cases:**\n"
                for use_case in info['use_cases']:
                    result_text += f"- {use_case}\n"
                
                result_text += "\n"
        
        elif config_type in config_info:
            info = config_info[config_type]
            result_text = f"""# {config_type.title().replace('_', ' ')} Configuration

**Description:** {info['description']}

## Key Features
"""
            for feature in info['features']:
                result_text += f"- {feature}\n"
            
            result_text += "\n## Use Cases\n"
            for use_case in info['use_cases']:
                result_text += f"- {use_case}\n"
        
        else:
            result_text = f"Error: Unknown configuration type '{config_type}'. Available types: computational_log, academic_paper, social_text"
        
        return result_text
        
    except Exception as e:
        logger.error(f"Get analysis config failed: {str(e)}")
        return f"Get analysis config failed: {str(e)}"


def _format_health_status(health_score: float) -> str:
    """Format health status with emoji and description"""
    
    if health_score >= 0.9:
        return "🟢 EXCELLENT - Minimal anomalies, high data quality"
    elif health_score >= 0.7:
        return "🟡 GOOD - Some anomalies present but manageable"
    elif health_score >= 0.5:
        return "🟠 MODERATE - Notable anomalies requiring attention"
    else:
        return "🔴 POOR - Significant numerical problems detected"


def main():
    """Main function to run the MCP server"""
    parser = argparse.ArgumentParser(description="Structural Text Analyzer MCP Server")
    parser.add_argument("--transport", choices=["stdio", "sse"], default="stdio",
                       help="Transport method for MCP communication")
    
    args = parser.parse_args()
    
    # Initialize the analyzer
    try:
        initialize_analyzer()
        logger.info("MCP server initialization completed")
    except Exception as e:
        logger.error(f"Failed to initialize MCP server: {e}")
        sys.exit(1)
    
    if args.transport == "stdio":
        # Run in STDIO mode for Claude Code integration
        mcp.run()
    else:
        logger.error(f"Unsupported transport method: {args.transport}")
        sys.exit(1)


if __name__ == "__main__":
    main()
