#!/usr/bin/env python3
"""
Enhanced Demo for Structural Text Analyzer
Shows both direct usage and MCP server capabilities
"""

import os
import sys
import tempfile
import json
from pathlib import Path

# Add the parent directory to Python path for imports
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)

from structural_analyzer.analyzer import StructuralTextAnalyzer
from structural_analyzer.config import ConfigManager

def create_enhanced_sample_files():
    """Create enhanced sample files for testing."""
    
    samples = {
        'convergence_log.txt': """
# GPU Linear Solver Convergence Log
# Date: 2025-07-24 HIP/ROCm Version 5.4
# Problem: Sparse Linear System Ax=b, Matrix size: 1M x 1M

Iteration 0: residual_norm = 1.234567e+02, relative_error = 1.000000e+00
Iteration 1: residual_norm = 6.172835e+01, relative_error = 5.000000e-01
Iteration 2: residual_norm = 3.086418e+01, relative_error = 2.500000e-01
Iteration 3: residual_norm = 1.543209e+01, relative_error = 1.250000e-01
Iteration 4: residual_norm = 7.716045e+00, relative_error = 6.250000e-02
Iteration 5: residual_norm = 3.858023e+00, relative_error = 3.125000e-02
Iteration 6: residual_norm = 1.929011e+00, relative_error = 1.562500e-02
Iteration 7: residual_norm = 9.645057e-01, relative_error = 7.812500e-03
Iteration 8: residual_norm = 4.822528e-01, relative_error = 3.906250e-03
Iteration 9: residual_norm = 2.411264e-01, relative_error = 1.953125e-03
Iteration 10: residual_norm = 1.205632e-01, relative_error = 9.765625e-04

Performance metrics:
- Peak memory usage: 4.56 GB
- GFLOPS achieved: 234.5
- Convergence rate: LINEAR_FAST (ratio ≈ 0.5)
- Total iterations: 15
- Wall time: 23.45 seconds

Status: CONVERGED ✓
Final residual: 1.23456e-06
Final error: 5.67890e-08
        """,
        
        'anomaly_data.txt': """
Data Quality Assessment Report
=============================

Batch processing results:
Batch 1: 12345 records processed, quality_score = 98.5
Batch 2: 23456 records processed, quality_score = 97.2  
Batch 3: NaN records processed, quality_score = NaN
Batch 4: 34567 records processed, quality_score = inf
Batch 5: 45678 records processed, quality_score = -inf

Temperature readings (°C): 23.4, 25.1, NaN, 22.8, inf, -999999
Pressure values (Pa): 101325, 101234, 101456, NaN, 1.23e+20
Error rates (%): 0.05, 0.12, NaN, 2.34, inf, 15.67

WARNING: Multiple NaN values detected in dataset
ERROR: Infinite values found in critical measurements  
CRITICAL: Division by zero in calculation module
INFO: Large outlier values detected (>1e15)

Data integrity status: COMPROMISED
Manual inspection required: YES
Automated recovery: FAILED
        """,
        
        'complex_paper.txt': """
Abstract: This comprehensive investigation explores the intricate relationships 
between iterative numerical algorithms and their convergence characteristics 
within the context of large-scale computational fluid dynamics simulations. 
We present novel theoretical frameworks that demonstrate significant improvements 
in algorithmic efficiency while maintaining numerical stability.

Introduction: The computational modeling of complex physical phenomena requires 
sophisticated numerical techniques that can handle multi-scale problems with 
varying degrees of nonlinearity. Traditional approaches often suffer from 
slow convergence rates, particularly when dealing with ill-conditioned systems 
arising from high-aspect-ratio meshes commonly encountered in boundary layer 
computations.

Our research introduces a paradigm-shifting methodology that combines adaptive 
preconditioning strategies with machine learning-enhanced parameter selection. 
The proposed algorithm demonstrates superlinear convergence properties with 
convergence rates consistently below 0.1, representing a substantial improvement 
over conventional methods.

Methodology: We employed a hybrid approach combining Conjugate Gradient methods 
with novel preconditioner adaptations. The algorithm dynamically adjusts 
parameters based on spectral characteristics of the coefficient matrix, 
resulting in optimal convergence behavior across diverse problem classes.

Results: Extensive numerical experiments across 15 benchmark problems show 
performance improvements ranging from 25% to 85% compared to state-of-the-art 
solvers. The algorithm maintains stability even for challenging test cases 
with condition numbers exceeding 1e12.

Conclusion: These findings establish new benchmarks for iterative solver 
performance and open avenues for next-generation computational frameworks 
in scientific computing applications.
        """
    }
    
    # Create temporary directory
    temp_dir = Path(tempfile.mkdtemp(prefix='enhanced_analyzer_demo_'))
    
    for filename, content in samples.items():
        file_path = temp_dir / filename
        with open(file_path, 'w', encoding='utf-8') as f:
            f.write(content.strip())
    
    print(f"Enhanced sample files created in: {temp_dir}")
    return temp_dir

def demo_enhanced_analysis():
    """Demonstrate enhanced analysis capabilities."""
    print("=" * 80)
    print("ENHANCED STRUCTURAL TEXT ANALYZER DEMONSTRATION")
    print("=" * 80)
    print()
    
    # Create sample files
    temp_dir = create_enhanced_sample_files()
    
    # Demo 1: Computational log analysis
    print("📊 DEMO 1: Computational Log Analysis")
    print("-" * 50)
    
    log_file = temp_dir / 'convergence_log.txt'
    analyzer = StructuralTextAnalyzer.from_text_type('computational_log', 'demo_enhanced_output')
    
    print(f"Analyzing: {log_file.name}")
    results = analyzer.analyze_file(str(log_file), "convergence_demo")
    
    # Show summary
    summary = analyzer.get_summary()
    print(f"✅ Analysis completed in {summary['file_info']['processing_time']:.3f} seconds")
    print(f"📈 Found {summary['trend_metrics']['sequences_analyzed']} numerical sequences")
    print(f"🔍 Health Score: {summary['numerical_metrics']['health_score']:.3f}/1.000")
    print()
    
    # Demo 2: Text descriptions generation
    print("📝 DEMO 2: Text-Based Analysis Descriptions")
    print("-" * 50)
    
    if analyzer.config.generate_text_descriptions:
        desc_files = analyzer.create_visualizations("convergence_demo")
        print(f"Generated {len(desc_files)} text description files:")
        for desc_file in desc_files:
            print(f"  📄 {os.path.basename(desc_file)}")
    print()
    
    # Demo 3: Anomaly detection
    print("🚨 DEMO 3: Advanced Anomaly Detection")
    print("-" * 50)
    
    anomaly_file = temp_dir / 'anomaly_data.txt'
    analyzer2 = StructuralTextAnalyzer.from_text_type('default', 'demo_enhanced_output')
    
    print(f"Analyzing: {anomaly_file.name}")
    results2 = analyzer2.analyze_file(str(anomaly_file), "anomaly_demo")
    
    # Show anomaly details
    anomalies = results2.get('numerical_analysis', {}).get('anomalies', {})
    if anomalies:
        anomaly_summary = anomalies.get('summary', {})
        print(f"🔍 Total anomalies detected: {anomaly_summary.get('total_anomalies', 0)}")
        print(f"📊 Anomaly rate: {anomaly_summary.get('anomaly_rate', 0)*100:.2f}%")
        
        anomaly_types = anomaly_summary.get('anomaly_types', {})
        for anomaly_type, count in anomaly_types.items():
            if count > 0:
                print(f"   • {anomaly_type.replace('_', ' ').title()}: {count}")
    print()
    
    # Demo 4: Academic paper analysis
    print("📚 DEMO 4: Academic Paper Analysis")
    print("-" * 50)
    
    paper_file = temp_dir / 'complex_paper.txt'
    analyzer3 = StructuralTextAnalyzer.from_text_type('academic_paper', 'demo_enhanced_output')
    
    print(f"Analyzing: {paper_file.name}")
    results3 = analyzer3.analyze_file(str(paper_file), "paper_demo")
    
    # Show text quality metrics
    summary3 = analyzer3.get_summary()
    complexity = results3.get('text_analysis', {}).get('complexity_metrics', {}).get('complexity_score', 0)
    print(f"📖 Vocabulary diversity: {summary3['text_metrics']['lexical_diversity']:.4f}")
    print(f"🎯 Complexity score: {complexity:.4f}")
    print(f"📏 Average sentence length: {summary3['text_metrics']['words'] / max(1, summary3['text_metrics']['sentences']):.1f} words")
    print()
    
    # Demo 5: Configuration comparison
    print("⚙️  DEMO 5: Configuration Comparison")
    print("-" * 50)
    
    configs = ['default', 'computational_log', 'academic_paper', 'social_text']
    print("Available analysis configurations:")
    for config_type in configs:
        config = ConfigManager.get_config_for_type(config_type)
        print(f"  📋 {config_type.upper()}:")
        print(f"     • Large number threshold: {config.large_number_threshold:.0e}")
        print(f"     • Min sequence length: {config.min_sequence_length}")
        print(f"     • Convergence tolerance: {config.convergence_tolerance:.0e}")
    print()
    
    # Clean up
    import shutil
    shutil.rmtree(temp_dir)
    
    print("🎉 Enhanced demonstration completed!")
    print(f"📁 Check 'demo_enhanced_output' directory for detailed results")

def demo_mcp_server_usage():
    """Demonstrate MCP server usage (simulated)."""
    print("=" * 80)
    print("MCP SERVER DEMONSTRATION")
    print("=" * 80)
    print()
    
    print("🔧 MCP Server Setup:")
    print("The Structural Text Analyzer can run as an MCP (Model Context Protocol) server,")
    print("allowing AI agents and applications to use it as a tool.")
    print()
    
    print("📡 Available MCP Tools:")
    tools = [
        ("analyze_text_content", "Analyze text content directly"),
        ("analyze_text_file", "Analyze a text file"),
        ("extract_numerical_patterns", "Extract numerical patterns"),
        ("analyze_convergence_trends", "Analyze trend patterns"),
        ("assess_text_quality", "Assess text quality and readability"),
        ("get_analysis_config", "Get configuration information")
    ]
    
    for tool_name, description in tools:
        print(f"  🛠️  {tool_name}: {description}")
    print()
    
    print("🚀 Starting MCP Server:")
    print("python -m structural_analyzer.mcp_server --mcp-server")
    print()
    
    print("📞 Example MCP Tool Calls:")
    print("Tool: analyze_text_content")
    example_call = {
        "text": "Iteration 1: residual = 1.23e-05\\nIteration 2: residual = 6.78e-06",
        "analysis_type": "computational_log",
        "output_format": "summary"
    }
    print(json.dumps(example_call, indent=2))
    print()
    
    print("🔄 Example Response:")
    example_response = """# Text Analysis Summary

## Text Metrics
- Characters: 58
- Words: 8
- Numbers found: 4
- Health score: 1.000/1.000

## Trend Analysis
- Sequences analyzed: 1
- Convergent sequences: 1
- Pattern: CONVERGENT ✅
"""
    print(example_response)

def demo_integration_example():
    """Show integration example with external systems."""
    print("=" * 80)
    print("INTEGRATION EXAMPLE")
    print("=" * 80)
    print()
    
    sample_text = """
    GPU Solver Status Report
    ========================
    Iteration 1: residual_norm = 1.000000e-01, convergence_rate = 0.85
    Iteration 2: residual_norm = 8.500000e-02, convergence_rate = 0.82
    Iteration 3: residual_norm = 6.970000e-02, convergence_rate = 0.79
    Iteration 4: residual_norm = 5.506300e-02, convergence_rate = 0.76
    
    Performance: 156.7 GFLOPS, Memory: 3.2 GB
    Status: CONVERGING
    """
    
    print("🔍 Sample Integration: Real-time Log Monitoring")
    print("-" * 50)
    print("Input text:")
    print(sample_text)
    print()
    
    # Analyze the sample
    analyzer = StructuralTextAnalyzer.from_text_type('computational_log')
    results = analyzer.analyze_text(sample_text, "integration_demo")
    
    # Extract key insights
    summary = analyzer.get_summary()
    
    print("📊 Automated Analysis Results:")
    print(f"  • Processing time: {summary['file_info']['processing_time']:.3f} seconds")
    print(f"  • Numbers detected: {summary['numerical_metrics']['total_numbers']}")
    print(f"  • Health score: {summary['numerical_metrics']['health_score']:.3f}")
    print(f"  • Sequences found: {summary['trend_metrics']['sequences_analyzed']}")
    print()
    
    # Show how this could trigger alerts
    health_score = summary['numerical_metrics']['health_score']
    convergent_sequences = summary['trend_metrics']['convergent_sequences']
    
    print("🚨 Automated Alert System:")
    if health_score < 0.7:
        print("  ⚠️  WARNING: Low health score detected")
    if convergent_sequences == 0:
        print("  🔴 ALERT: No convergent sequences found")
    if summary['trend_metrics']['divergent_sequences'] > 0:
        print("  🚨 CRITICAL: Divergent behavior detected")
    
    if health_score >= 0.9 and convergent_sequences > 0:
        print("  ✅ OPTIMAL: System performing well")
    
    print()
    print("💡 Integration Benefits:")
    print("  • Real-time analysis of computational logs")
    print("  • Automated anomaly detection and alerting") 
    print("  • Performance trend monitoring")
    print("  • Quality assessment without human intervention")
    print("  • Integration with monitoring dashboards")

def main():
    """Run all enhanced demonstrations."""
    print("🚀 Structural Text Analyzer - Enhanced Demonstration Suite")
    print("Created by: MiniMax Agent")
    print()
    
    try:
        # Main analysis demo
        demo_enhanced_analysis()
        print()
        
        # MCP server demo
        demo_mcp_server_usage()
        print()
        
        # Integration example
        demo_integration_example()
        
        print()
        print("=" * 80)
        print("✅ ALL ENHANCED DEMONSTRATIONS COMPLETED SUCCESSFULLY!")
        print("=" * 80)
        print()
        print("📚 What's New in Enhanced Version:")
        print("  ✨ Text-based descriptions instead of visual plots")
        print("  🔧 MCP server support for AI agent integration")
        print("  🚀 Improved numerical analysis and trend detection")
        print("  📊 Enhanced configuration profiles")
        print("  ⚡ Better performance and error handling")
        print("  🎯 More precise anomaly detection")
        print()
        print("🛠️  Usage Options:")
        print("  📖 Direct usage: from structural_analyzer import StructuralTextAnalyzer")
        print("  💻 Command line: python -m structural_analyzer input.txt")
        print("  🔌 MCP server: python -m structural_analyzer.mcp_server --mcp-server")
        print()
        print("🎯 Perfect for:")
        print("  • Scientific computing and HPC monitoring")
        print("  • Data quality assessment and validation")
        print("  • Text analysis research and NLP applications")
        print("  • AI agent integration and automation")
        
    except KeyboardInterrupt:
        print("\\n⏹️  Demonstration interrupted by user.")
    except Exception as e:
        print(f"\\n❌ Demonstration failed: {e}")
        import traceback
        traceback.print_exc()

if __name__ == '__main__':
    main()
