#!/usr/bin/env python3
"""
MCP Server for Structural Text Analyzer

This module implements a Model Context Protocol (MCP) server that exposes
the structural text analysis capabilities as tools that can be used by
AI agents and applications.
"""

import asyncio
import json
import sys
import tempfile
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union

# MCP imports
try:
    from mcp.server import Server, NotificationOptions
    from mcp.server.models import InitializationOptions
    import mcp.server.stdio
    import mcp.types as types
    HAS_MCP = True
except ImportError:
    HAS_MCP = False
    print("Warning: MCP library not found. Install with: pip install mcp")

try:
    from .analyzer import StructuralTextAnalyzer
    from .config import ConfigManager
except ImportError:
    # Handle case when running as standalone script
    from analyzer import StructuralTextAnalyzer
    from config import ConfigManager

class StructuralAnalyzerMCPServer:
    """MCP Server for Structural Text Analysis"""
    
    def __init__(self):
        if not HAS_MCP:
            raise ImportError("MCP library required. Install with: pip install mcp")
            
        self.server = Server("structural-text-analyzer")
        self.setup_tools()
        
    def setup_tools(self):
        """Register all MCP tools"""
        
        @self.server.list_tools()
        async def handle_list_tools() -> list[types.Tool]:
            """List available analysis tools"""
            return [
                types.Tool(
                    name="analyze_text_content",
                    description="Analyze text content directly for structural patterns, numerical anomalies, and trends",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "text": {
                                "type": "string",
                                "description": "Text content to analyze"
                            },
                            "analysis_type": {
                                "type": "string",
                                "enum": ["default", "computational_log", "academic_paper", "social_text"],
                                "default": "default",
                                "description": "Type of text for optimized analysis"
                            },
                            "output_format": {
                                "type": "string",
                                "enum": ["summary", "detailed", "json"],
                                "default": "summary",
                                "description": "Level of detail in output"
                            }
                        },
                        "required": ["text"]
                    }
                ),
                
                types.Tool(
                    name="analyze_text_file",
                    description="Analyze a text file for structural patterns, numerical anomalies, and trends",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "file_path": {
                                "type": "string",
                                "description": "Path to the text file to analyze"
                            },
                            "analysis_type": {
                                "type": "string",
                                "enum": ["default", "computational_log", "academic_paper", "social_text"],
                                "default": "default",
                                "description": "Type of text for optimized analysis"
                            },
                            "output_format": {
                                "type": "string",
                                "enum": ["summary", "detailed", "json"],
                                "default": "summary",
                                "description": "Level of detail in output"
                            }
                        },
                        "required": ["file_path"]
                    }
                ),
                
                types.Tool(
                    name="extract_numerical_patterns",
                    description="Extract and analyze numerical patterns from text",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "text": {
                                "type": "string",
                                "description": "Text content to analyze for numerical patterns"
                            },
                            "detect_anomalies": {
                                "type": "boolean",
                                "default": True,
                                "description": "Whether to detect numerical anomalies (NaN, Inf, etc.)"
                            }
                        },
                        "required": ["text"]
                    }
                ),
                
                types.Tool(
                    name="analyze_convergence_trends",
                    description="Analyze convergence and trend patterns in numerical sequences",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "text": {
                                "type": "string",
                                "description": "Text containing numerical sequences to analyze"
                            },
                            "sequence_patterns": {
                                "type": "array",
                                "items": {"type": "string"},
                                "description": "Optional regex patterns to extract specific sequences"
                            }
                        },
                        "required": ["text"]
                    }
                ),
                
                types.Tool(
                    name="assess_text_quality",
                    description="Assess text quality including readability, complexity, and structure",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "text": {
                                "type": "string",
                                "description": "Text content to assess"
                            },
                            "include_recommendations": {
                                "type": "boolean",
                                "default": True,
                                "description": "Whether to include improvement recommendations"
                            }
                        },
                        "required": ["text"]
                    }
                ),
                
                types.Tool(
                    name="get_analysis_config",
                    description="Get available analysis configurations and their descriptions",
                    inputSchema={
                        "type": "object",
                        "properties": {
                            "config_type": {
                                "type": "string",
                                "enum": ["all", "computational_log", "academic_paper", "social_text"],
                                "default": "all",
                                "description": "Which configuration to describe"
                            }
                        }
                    }
                )
            ]
        
        @self.server.call_tool()
        async def handle_call_tool(name: str, arguments: dict) -> list[types.TextContent]:
            """Handle tool calls"""
            
            try:
                if name == "analyze_text_content":
                    return await self._analyze_text_content(arguments)
                elif name == "analyze_text_file":
                    return await self._analyze_text_file(arguments)
                elif name == "extract_numerical_patterns":
                    return await self._extract_numerical_patterns(arguments)
                elif name == "analyze_convergence_trends":
                    return await self._analyze_convergence_trends(arguments)
                elif name == "assess_text_quality":
                    return await self._assess_text_quality(arguments)
                elif name == "get_analysis_config":
                    return await self._get_analysis_config(arguments)
                else:
                    raise ValueError(f"Unknown tool: {name}")
                    
            except Exception as e:
                return [types.TextContent(
                    type="text",
                    text=f"Error executing tool '{name}': {str(e)}"
                )]
    
    async def _analyze_text_content(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Analyze text content directly"""
        
        text = args["text"]
        analysis_type = args.get("analysis_type", "default")
        output_format = args.get("output_format", "summary")
        
        # Create analyzer with appropriate configuration
        analyzer = StructuralTextAnalyzer.from_text_type(analysis_type)
        
        # Perform analysis
        results = analyzer.analyze_text(text, "mcp_analysis")
        
        # Format output based on request
        if output_format == "json":
            return [types.TextContent(
                type="text",
                text=json.dumps(results, indent=2, default=str)
            )]
        elif output_format == "detailed":
            # Generate detailed report
            with tempfile.TemporaryDirectory() as temp_dir:
                analyzer.output_dir = temp_dir
                report_path = analyzer.generate_report()
                
                with open(report_path, 'r', encoding='utf-8') as f:
                    detailed_report = f.read()
                
                return [types.TextContent(
                    type="text",
                    text=detailed_report
                )]
        else:
            # Summary format
            summary = analyzer.get_summary()
            
            summary_text = f"""# Text Analysis Summary

## File Information
- Processing time: {summary['file_info']['processing_time']:.3f} seconds
- Size: {summary['file_info']['size_bytes']:,} bytes

## Text Metrics
- Characters: {summary['text_metrics']['characters']:,}
- Words: {summary['text_metrics']['words']:,}
- Sentences: {summary['text_metrics']['sentences']:,}
- Vocabulary: {summary['text_metrics']['vocabulary_size']:,} unique words
- Lexical diversity: {summary['text_metrics']['lexical_diversity']:.4f}

## Numerical Analysis
- Numbers found: {summary['numerical_metrics']['total_numbers']:,}
- Valid numbers: {summary['numerical_metrics']['valid_numbers']:,}
- Health score: {summary['numerical_metrics']['health_score']:.3f}/1.000

## Trend Analysis
- Sequences analyzed: {summary['trend_metrics']['sequences_analyzed']}
- Convergent sequences: {summary['trend_metrics']['convergent_sequences']}
- Divergent sequences: {summary['trend_metrics']['divergent_sequences']}

## Health Assessment
{self._format_health_status(summary['numerical_metrics']['health_score'])}
"""
            
            return [types.TextContent(type="text", text=summary_text)]
    
    async def _analyze_text_file(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Analyze a text file"""
        
        file_path = args["file_path"]
        analysis_type = args.get("analysis_type", "default")
        output_format = args.get("output_format", "summary")
        
        # Validate file exists
        if not os.path.exists(file_path):
            return [types.TextContent(
                type="text",
                text=f"Error: File not found: {file_path}"
            )]
        
        # Create analyzer with appropriate configuration
        analyzer = StructuralTextAnalyzer.from_text_type(analysis_type)
        
        # Perform analysis
        results = analyzer.analyze_file(file_path, "mcp_file_analysis")
        
        # Format output similar to analyze_text_content
        if output_format == "json":
            return [types.TextContent(
                type="text",
                text=json.dumps(results, indent=2, default=str)
            )]
        elif output_format == "detailed":
            with tempfile.TemporaryDirectory() as temp_dir:
                analyzer.output_dir = temp_dir
                report_path = analyzer.generate_report()
                
                with open(report_path, 'r', encoding='utf-8') as f:
                    detailed_report = f.read()
                
                return [types.TextContent(
                    type="text",
                    text=detailed_report
                )]
        else:
            # Summary format
            summary = analyzer.get_summary()
            
            summary_text = f"""# File Analysis Summary: {os.path.basename(file_path)}

## File Information
- Filename: {summary['file_info']['filename']}
- Size: {summary['file_info']['size_bytes']:,} bytes
- Processing time: {summary['file_info']['processing_time']:.3f} seconds

## Text Metrics
- Characters: {summary['text_metrics']['characters']:,}
- Words: {summary['text_metrics']['words']:,}
- Sentences: {summary['text_metrics']['sentences']:,}
- Vocabulary: {summary['text_metrics']['vocabulary_size']:,} unique words
- Lexical diversity: {summary['text_metrics']['lexical_diversity']:.4f}

## Numerical Analysis
- Numbers found: {summary['numerical_metrics']['total_numbers']:,}
- Valid numbers: {summary['numerical_metrics']['valid_numbers']:,}
- Health score: {summary['numerical_metrics']['health_score']:.3f}/1.000

## Trend Analysis
- Sequences analyzed: {summary['trend_metrics']['sequences_analyzed']}
- Convergent sequences: {summary['trend_metrics']['convergent_sequences']}
- Divergent sequences: {summary['trend_metrics']['divergent_sequences']}

## Health Assessment
{self._format_health_status(summary['numerical_metrics']['health_score'])}
"""
            
            return [types.TextContent(type="text", text=summary_text)]
    
    async def _extract_numerical_patterns(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Extract and analyze numerical patterns"""
        
        text = args["text"]
        detect_anomalies = args.get("detect_anomalies", True)
        
        # Create analyzer
        analyzer = StructuralTextAnalyzer()
        
        # Extract numerical patterns
        num_results = analyzer.numerical_analyzer.analyze(text)
        
        # Format results
        summary = num_results.get('summary', {})
        distribution = num_results.get('distribution', {})
        anomalies = num_results.get('anomalies', {}) if detect_anomalies else {}
        
        result_text = f"""# Numerical Pattern Analysis

## Numbers Detected
- Total numbers found: {summary.get('total_numbers_found', 0):,}
- Valid numbers: {summary.get('valid_numbers', 0):,}
- Invalid numbers: {summary.get('invalid_numbers', 0):,}
- Scientific notation: {summary.get('scientific_notation_count', 0):,}

## Statistical Distribution
"""
        
        if distribution:
            result_text += f"""- Mean: {distribution.get('mean', 0):.6e}
- Median: {distribution.get('median', 0):.6e}
- Standard deviation: {distribution.get('std', 0):.6e}
- Range: [{distribution.get('min', 0):.6e}, {distribution.get('max', 0):.6e}]
- Positive numbers: {distribution.get('positives_count', 0):,}
- Negative numbers: {distribution.get('negatives_count', 0):,}
- Zero values: {distribution.get('zeros_count', 0):,}
"""
        
        if detect_anomalies and anomalies:
            anomaly_summary = anomalies.get('summary', {})
            anomaly_types = anomaly_summary.get('anomaly_types', {})
            
            result_text += f"""
## Anomaly Detection
- Total anomalies: {anomaly_summary.get('total_anomalies', 0):,}
- Anomaly rate: {anomaly_summary.get('anomaly_rate', 0)*100:.2f}%

### Anomaly Breakdown:
"""
            for anomaly_type, count in anomaly_types.items():
                if count > 0:
                    result_text += f"- {anomaly_type.replace('_', ' ').title()}: {count:,}\\n"
            
            health_score = num_results.get('health_score', 0)
            result_text += f"""
## Health Assessment
- Health score: {health_score:.3f}/1.000
- Status: {self._format_health_status(health_score)}
"""
        
        return [types.TextContent(type="text", text=result_text)]
    
    async def _analyze_convergence_trends(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Analyze convergence and trend patterns"""
        
        text = args["text"]
        sequence_patterns = args.get("sequence_patterns")
        
        # Create analyzer
        analyzer = StructuralTextAnalyzer()
        
        # Extract sequences
        if sequence_patterns:
            sequences = analyzer.numerical_analyzer.extract_sequences(text, sequence_patterns)
        else:
            sequences = analyzer.numerical_analyzer.extract_sequences(text)
        
        if not sequences:
            return [types.TextContent(
                type="text",
                text="No numerical sequences found for trend analysis."
            )]
        
        # Analyze trends
        trend_results = analyzer.trend_analyzer.analyze_multiple_sequences(sequences)
        
        # Format results
        result_text = f"""# Convergence and Trend Analysis

## Sequences Found: {len(sequences)}

"""
        
        for seq_name, seq_analysis in trend_results.items():
            if seq_name == 'comparative_analysis':
                continue
            
            if isinstance(seq_analysis, dict):
                trend_type = seq_analysis.get('trend_type', 'unknown')
                data_points = seq_analysis.get('data_points', 0)
                
                result_text += f"""### Sequence: {seq_name}
- Data points: {data_points}
- Trend type: {trend_type.upper()}
"""
                
                # Add convergence info if available
                convergence = seq_analysis.get('convergence_analysis', {})
                if convergence:
                    conv_rate = convergence.get('convergence_rate', 'unknown')
                    conv_ratio = convergence.get('convergence_ratio', 'N/A')
                    
                    result_text += f"- Convergence rate: {conv_rate.upper()}\\n"
                    if isinstance(conv_ratio, (int, float)):
                        result_text += f"- Convergence ratio: {conv_ratio:.6f}\\n"
                
                # Add trend metrics
                trend_metrics = seq_analysis.get('trend_metrics', {})
                if trend_metrics:
                    result_text += f"- Confidence: {trend_metrics.get('confidence_score', 0):.3f}\\n"
                    result_text += f"- Stability: {trend_metrics.get('stability_score', 0):.3f}\\n"
                
                result_text += "\\n"
        
        # Add comparative analysis if available
        if 'comparative_analysis' in trend_results:
            comp = trend_results['comparative_analysis']
            result_text += f"""## Comparative Analysis
- Total sequences: {comp.get('sequence_count', 0)}
- Average length: {comp.get('length_statistics', {}).get('mean_length', 0):.1f}
"""
            
            conv_comp = comp.get('convergence_comparison', {})
            if conv_comp:
                result_text += f"- Best convergence ratio: {conv_comp.get('best_convergence', 0):.6f}\\n"
                result_text += f"- Mean convergence ratio: {conv_comp.get('mean_convergence_ratio', 0):.6f}\\n"
        
        return [types.TextContent(type="text", text=result_text)]
    
    async def _assess_text_quality(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Assess text quality and readability"""
        
        text = args["text"]
        include_recommendations = args.get("include_recommendations", True)
        
        # Create analyzer
        analyzer = StructuralTextAnalyzer()
        
        # Analyze text structure
        text_results = analyzer.text_analyzer.analyze(text)
        
        # Extract key metrics
        basic_stats = text_results.get('basic_statistics', {})
        complexity = text_results.get('complexity_metrics', {})
        
        result_text = f"""# Text Quality Assessment

## Basic Statistics
- Characters: {basic_stats.get('total_characters', 0):,}
- Words: {basic_stats.get('total_words', 0):,}
- Sentences: {basic_stats.get('total_sentences', 0):,}
- Average sentence length: {basic_stats.get('avg_sentence_length', 0):.1f} words
- Vocabulary size: {basic_stats.get('vocabulary_size', 0):,}
- Lexical diversity: {basic_stats.get('lexical_diversity', 0):.4f}

## Complexity Analysis
"""
        
        complexity_score = complexity.get('complexity_score', 0)
        result_text += f"- Complexity score: {complexity_score:.4f}\\n"
        
        if complexity_score > 0.7:
            result_text += "- Level: HIGH complexity\\n"
            result_text += "- Reading difficulty: Advanced\\n"
        elif complexity_score > 0.4:
            result_text += "- Level: MODERATE complexity\\n"
            result_text += "- Reading difficulty: Intermediate\\n"
        else:
            result_text += "- Level: LOW complexity\\n"
            result_text += "- Reading difficulty: Basic\\n"
        
        # Readability metrics
        readability = complexity.get('readability_metrics', {})
        if readability:
            flesch_score = readability.get('flesch_reading_ease', 0)
            result_text += f"""
## Readability Metrics
- Flesch Reading Ease: {flesch_score:.1f}
- Grade level: """
            
            if flesch_score >= 70:
                result_text += "7th grade (Easy)\\n"
            elif flesch_score >= 50:
                result_text += "High school (Moderate)\\n"
            elif flesch_score >= 30:
                result_text += "College (Difficult)\\n"
            else:
                result_text += "Graduate (Very difficult)\\n"
        
        # Recommendations
        if include_recommendations:
            result_text += "\\n## Recommendations\\n"
            
            recommendations = []
            
            if complexity_score > 0.8:
                recommendations.append("Consider simplifying language and sentence structure")
            
            if basic_stats.get('avg_sentence_length', 0) > 25:
                recommendations.append("Break down long sentences for better readability")
            
            if basic_stats.get('lexical_diversity', 0) < 0.3:
                recommendations.append("Increase vocabulary variety to avoid repetition")
            
            if readability.get('flesch_reading_ease', 100) < 30:
                recommendations.append("Improve readability by using simpler words and shorter sentences")
            
            if not recommendations:
                recommendations.append("Text quality is good - continue current writing style")
            
            for i, rec in enumerate(recommendations, 1):
                result_text += f"{i}. {rec}\\n"
        
        return [types.TextContent(type="text", text=result_text)]
    
    async def _get_analysis_config(self, args: Dict[str, Any]) -> List[types.TextContent]:
        """Get analysis configuration information"""
        
        config_type = args.get("config_type", "all")
        
        config_info = {
            "computational_log": {
                "description": "Optimized for numerical computation logs and solver output",
                "features": [
                    "Enhanced convergence analysis",
                    "Scientific notation detection", 
                    "Residual norm tracking",
                    "Lower thresholds for large numbers",
                    "Shorter minimum sequence lengths"
                ],
                "use_cases": [
                    "HIP/ROCm debug output",
                    "Iterative solver logs",
                    "Performance monitoring",
                    "Numerical simulation results"
                ]
            },
            "academic_paper": {
                "description": "Optimized for academic and research papers",
                "features": [
                    "Enhanced readability analysis",
                    "Vocabulary assessment",
                    "Longer sentence optimization",
                    "Minimal numerical focus",
                    "Citation pattern detection"
                ],
                "use_cases": [
                    "Research papers",
                    "Technical documentation",
                    "Educational content",
                    "Literature analysis"
                ]
            },
            "social_text": {
                "description": "Optimized for social media and informal text",
                "features": [
                    "Short sentence optimization",
                    "Informal language patterns",
                    "Emoji handling",
                    "Minimal anomaly detection",
                    "Higher numerical thresholds"
                ],
                "use_cases": [
                    "Social media posts",
                    "Chat conversations",
                    "Informal communications",
                    "User-generated content"
                ]
            }
        }
        
        if config_type == "all":
            result_text = "# Analysis Configuration Types\\n\\n"
            
            for name, info in config_info.items():
                result_text += f"""## {name.title().replace('_', ' ')}
**Description:** {info['description']}

**Key Features:**
"""
                for feature in info['features']:
                    result_text += f"- {feature}\\n"
                
                result_text += "\\n**Use Cases:**\\n"
                for use_case in info['use_cases']:
                    result_text += f"- {use_case}\\n"
                
                result_text += "\\n"
        
        elif config_type in config_info:
            info = config_info[config_type]
            result_text = f"""# {config_type.title().replace('_', ' ')} Configuration

**Description:** {info['description']}

## Key Features
"""
            for feature in info['features']:
                result_text += f"- {feature}\\n"
            
            result_text += "\\n## Use Cases\\n"
            for use_case in info['use_cases']:
                result_text += f"- {use_case}\\n"
        
        else:
            result_text = f"Error: Unknown configuration type '{config_type}'. Available types: computational_log, academic_paper, social_text"
        
        return [types.TextContent(type="text", text=result_text)]
    
    def _format_health_status(self, health_score: float) -> str:
        """Format health status with emoji and description"""
        
        if health_score >= 0.9:
            return "🟢 EXCELLENT - Minimal anomalies, high data quality"
        elif health_score >= 0.7:
            return "🟡 GOOD - Some anomalies present but manageable"
        elif health_score >= 0.5:
            return "🟠 MODERATE - Notable anomalies requiring attention"
        else:
            return "🔴 POOR - Significant numerical problems detected"

async def run_mcp_server():
    """Run the MCP server"""
    
    if not HAS_MCP:
        print("Error: MCP library not found. Install with: pip install mcp")
        return
    
    # Create and run server
    server_instance = StructuralAnalyzerMCPServer()
    
    async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
        await server_instance.server.run(
            read_stream,
            write_stream,
            InitializationOptions(
                server_name="structural-text-analyzer",
                server_version="1.0.0",
                capabilities=server_instance.server.get_capabilities(
                    notification_options=NotificationOptions(),
                    experimental_capabilities={}
                )
            )
        )

if __name__ == "__main__":
    if len(sys.argv) > 1 and sys.argv[1] == "--mcp-server":
        # Run as MCP server
        asyncio.run(run_mcp_server())
    else:
        print("Structural Text Analyzer MCP Server")
        print("Usage: python mcp_server.py --mcp-server")
        print()
        print("Available tools:")
        print("- analyze_text_content: Analyze text content directly")
        print("- analyze_text_file: Analyze a text file")
        print("- extract_numerical_patterns: Extract numerical patterns")
        print("- analyze_convergence_trends: Analyze trend patterns")
        print("- assess_text_quality: Assess text quality and readability")
        print("- get_analysis_config: Get configuration information")
