"""Standalone Structural Text Analyzer MCP Server - Simplified Version"""

import asyncio
import logging
import os
import sys
import re
import json
from typing import Dict, Any, Optional, List
import argparse
from pathlib import Path

# Simple fallback if FastMCP is not available
try:
    from fastmcp import FastMCP
    HAS_FASTMCP = True
except ImportError:
    HAS_FASTMCP = False
    print("FastMCP not found - will be installed automatically", file=sys.stderr)

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Only proceed if FastMCP is available (it will be installed by run.sh)
if not HAS_FASTMCP:
    print("Waiting for dependencies to be installed...", file=sys.stderr)
    sys.exit(1)

# Create MCP server instance
mcp = FastMCP("Structural Text Analyzer Server")

# Simple numerical pattern extraction function
def extract_numbers_from_text(text: str) -> List[float]:
    """Extract numerical values from text"""
    # Pattern for numbers including scientific notation
    pattern = r'-?\d+\.?\d*(?:[eE][+-]?\d+)?'
    matches = re.findall(pattern, text)
    
    numbers = []
    for match in matches:
        try:
            numbers.append(float(match))
        except ValueError:
            continue
    
    return numbers

def analyze_basic_trends(numbers: List[float]) -> Dict[str, Any]:
    """Basic trend analysis of numerical sequence"""
    if len(numbers) < 2:
        return {"trend": "insufficient_data", "message": "Need at least 2 numbers"}
    
    # Calculate differences
    diffs = [numbers[i+1] - numbers[i] for i in range(len(numbers)-1)]
    avg_change = sum(diffs) / len(diffs)
    
    # Determine trend
    if abs(avg_change) < 0.01 * abs(numbers[0]) if numbers[0] != 0 else 0.01:
        trend = "stable"
    elif avg_change < 0:
        # Check if decreasing (convergence)
        if all(d < 0 for d in diffs[-3:]) if len(diffs) >= 3 else avg_change < 0:
            trend = "convergent"
        else:
            trend = "decreasing"
    else:
        trend = "increasing"
    
    return {
        "trend": trend,
        "data_points": len(numbers),
        "first_value": numbers[0],
        "last_value": numbers[-1],
        "change": numbers[-1] - numbers[0],
        "avg_change": avg_change
    }

def detect_anomalies(numbers: List[float]) -> Dict[str, Any]:
    """Detect numerical anomalies"""
    if not numbers:
        return {"total_anomalies": 0, "anomaly_types": {}}
    
    anomalies = {
        "nan": 0,
        "inf": 0,
        "large_numbers": 0,
        "small_numbers": 0
    }
    
    for num in numbers:
        if str(num).lower() in ['nan', 'inf', '-inf']:
            if 'nan' in str(num).lower():
                anomalies["nan"] += 1
            else:
                anomalies["inf"] += 1
        elif abs(num) > 1e10:
            anomalies["large_numbers"] += 1
        elif abs(num) < 1e-10 and num != 0:
            anomalies["small_numbers"] += 1
    
    total = sum(anomalies.values())
    return {
        "total_anomalies": total,
        "anomaly_types": anomalies,
        "anomaly_rate": total / len(numbers) if numbers else 0
    }

@mcp.tool
def analyze_text_content(text: str, analysis_type: str = "default") -> str:
    """Analyze text content for numerical patterns and trends
    
    Args:
        text: Text content to analyze
        analysis_type: Type of analysis (default, computational_log, etc.)
        
    Returns:
        Analysis results summary
    """
    try:
        logger.info(f"Analyzing text content ({len(text)} chars)")
        
        # Basic text stats
        char_count = len(text)
        word_count = len(text.split())
        line_count = len(text.splitlines())
        
        # Extract numbers
        numbers = extract_numbers_from_text(text)
        
        # Analyze trends if we have numbers
        trend_analysis = {}
        if numbers:
            trend_analysis = analyze_basic_trends(numbers)
        
        # Detect anomalies
        anomaly_analysis = detect_anomalies(numbers)
        
        # Calculate health score
        health_score = 1.0 - (anomaly_analysis["anomaly_rate"] * 0.5)
        health_score = max(0.0, min(1.0, health_score))
        
        # Format results
        result = f"""# Text Analysis Summary

## Basic Statistics
- Characters: {char_count:,}
- Words: {word_count:,}
- Lines: {line_count:,}

## Numerical Analysis
- Numbers found: {len(numbers):,}
- Health score: {health_score:.3f}/1.000

## Anomaly Detection
- Total anomalies: {anomaly_analysis['total_anomalies']:,}
- Anomaly rate: {anomaly_analysis['anomaly_rate']*100:.2f}%
"""

        if anomaly_analysis['total_anomalies'] > 0:
            result += "\n### Anomaly Breakdown:\n"
            for anom_type, count in anomaly_analysis['anomaly_types'].items():
                if count > 0:
                    result += f"- {anom_type.replace('_', ' ').title()}: {count}\n"

        if trend_analysis:
            result += f"""
## Trend Analysis
- Data points: {trend_analysis['data_points']}
- Trend: {trend_analysis['trend'].upper()}
- First value: {trend_analysis['first_value']:.6e}
- Last value: {trend_analysis['last_value']:.6e}
- Total change: {trend_analysis['change']:.6e}
"""

        # Health assessment
        if health_score >= 0.9:
            result += "\n## Health Assessment\n🟢 EXCELLENT - Minimal anomalies detected"
        elif health_score >= 0.7:
            result += "\n## Health Assessment\n🟡 GOOD - Some anomalies present"
        elif health_score >= 0.5:
            result += "\n## Health Assessment\n🟠 MODERATE - Notable anomalies found"
        else:
            result += "\n## Health Assessment\n🔴 POOR - Significant issues detected"
        
        return result
        
    except Exception as e:
        logger.error(f"Text analysis failed: {str(e)}")
        return f"Text analysis failed: {str(e)}"

@mcp.tool
def analyze_text_file(file_path: str, analysis_type: str = "default") -> str:
    """Analyze a text file for patterns and trends
    
    Args:
        file_path: Path to the text file
        analysis_type: Type of analysis
        
    Returns:
        Analysis results summary
    """
    try:
        if not os.path.exists(file_path):
            return f"Error: File not found: {file_path}"
        
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            content = f.read()
        
        logger.info(f"Analyzing file: {file_path}")
        
        # Use the text analysis function
        result = analyze_text_content(content, analysis_type)
        
        # Add file info
        file_size = os.path.getsize(file_path)
        file_result = f"# File Analysis: {os.path.basename(file_path)}\n\n"
        file_result += f"**File:** {file_path}\n"
        file_result += f"**Size:** {file_size:,} bytes\n\n"
        file_result += result
        
        return file_result
        
    except Exception as e:
        logger.error(f"File analysis failed: {str(e)}")
        return f"File analysis failed: {str(e)}"

@mcp.tool
def extract_numerical_patterns(text: str) -> str:
    """Extract numerical patterns from text
    
    Args:
        text: Text content to analyze
        
    Returns:
        Numerical pattern analysis
    """
    try:
        numbers = extract_numbers_from_text(text)
        
        if not numbers:
            return "No numerical patterns found in the text."
        
        # Basic statistics
        mean_val = sum(numbers) / len(numbers)
        min_val = min(numbers)
        max_val = max(numbers)
        
        # Scientific notation detection
        sci_notation_count = len([n for n in re.findall(r'\d+\.?\d*[eE][+-]?\d+', text)])
        
        result = f"""# Numerical Pattern Analysis

## Numbers Detected
- Total numbers: {len(numbers):,}
- Scientific notation: {sci_notation_count:,}

## Statistical Summary
- Mean: {mean_val:.6e}
- Minimum: {min_val:.6e}
- Maximum: {max_val:.6e}
- Range: {max_val - min_val:.6e}

## Sample Values
"""
        
        # Show first few numbers
        sample_size = min(10, len(numbers))
        for i, num in enumerate(numbers[:sample_size]):
            result += f"- {i+1}: {num:.6e}\n"
        
        if len(numbers) > sample_size:
            result += f"... and {len(numbers) - sample_size} more values"
        
        return result
        
    except Exception as e:
        logger.error(f"Numerical pattern extraction failed: {str(e)}")
        return f"Numerical pattern extraction failed: {str(e)}"

@mcp.tool
def get_analysis_config() -> str:
    """Get information about available analysis configurations
    
    Returns:
        Configuration information
    """
    return """# Analysis Configuration Options

## Available Analysis Types

### default
- General-purpose text analysis
- Balanced numerical and text processing
- Standard anomaly thresholds

### computational_log  
- Optimized for solver logs and computational output
- Enhanced convergence detection
- Scientific notation handling
- Lower thresholds for large numbers

### academic_paper
- Focus on readability and text quality
- Vocabulary analysis
- Minimal numerical processing

### social_text
- Short text optimization
- Informal language patterns
- Higher numerical thresholds

## Usage
Specify analysis_type parameter in tool calls:
- analyze_text_content(text, analysis_type="computational_log")
- analyze_text_file(file_path, analysis_type="academic_paper")
"""

def main():
    """Main function to run the MCP server"""
    parser = argparse.ArgumentParser(description="Structural Text Analyzer MCP Server")
    parser.add_argument("--transport", choices=["stdio"], default="stdio",
                       help="Transport method for MCP communication")
    
    args = parser.parse_args()
    
    logger.info("Starting Structural Text Analyzer MCP Server")
    
    if args.transport == "stdio":
        # Run in STDIO mode for Claude Code integration
        mcp.run()
    else:
        logger.error(f"Unsupported transport method: {args.transport}")
        sys.exit(1)

if __name__ == "__main__":
    main()
