"""
Text-based visualization module.
Generates detailed text descriptions instead of visual plots.
"""

import os
import numpy as np
from typing import Dict, List, Any, Optional
from collections import Counter


class TextVisualizer:
    """Creates comprehensive text-based analysis descriptions."""
    
    def __init__(self, config, output_dir: str = "output"):
        self.config = config
        self.output_dir = output_dir
        
        # Create output directory if it doesn't exist
        os.makedirs(output_dir, exist_ok=True)
    
    def create_text_descriptions(self, results: Dict[str, Any], filename_prefix: str = "analysis") -> List[str]:
        """Create comprehensive text-based analysis descriptions."""
        
        created_files = []
        
        # Text structure descriptions
        if 'text_analysis' in results:
            text_desc = self._create_text_structure_description(results['text_analysis'], filename_prefix)
            if text_desc:
                created_files.append(text_desc)
            
        # Numerical analysis descriptions  
        if 'numerical_analysis' in results:
            num_desc = self._create_numerical_description(results['numerical_analysis'], filename_prefix)
            if num_desc:
                created_files.append(num_desc)
            
        # Trend analysis descriptions
        if 'trend_analysis' in results:
            trend_desc = self._create_trend_description(results['trend_analysis'], filename_prefix)
            if trend_desc:
                created_files.append(trend_desc)
            
        # Summary dashboard description
        dashboard_file = self._create_dashboard_description(results, filename_prefix)
        if dashboard_file:
            created_files.append(dashboard_file)
            
        return created_files
    
    def _create_text_structure_description(self, text_results: Dict[str, Any], prefix: str) -> Optional[str]:
        """Create detailed text structure analysis description."""
        
        try:
            lines = []
            lines.append("# Text Structure Analysis Description")
            lines.append("=" * 50)
            lines.append("")
            
            # Basic statistics
            basic_stats = text_results.get('basic_statistics', {})
            lines.append("## Basic Text Statistics")
            lines.append(f"📊 **Character Analysis:**")
            lines.append(f"   • Total characters: {basic_stats.get('total_characters', 'N/A'):,}")
            lines.append(f"   • Total words: {basic_stats.get('total_words', 'N/A'):,}")
            lines.append(f"   • Total sentences: {basic_stats.get('total_sentences', 'N/A'):,}")
            lines.append(f"   • Average word length: {basic_stats.get('avg_word_length', 'N/A'):.2f} characters")
            lines.append("")
            
            # Vocabulary analysis
            lines.append(f"📚 **Vocabulary Analysis:**")
            lines.append(f"   • Unique words: {basic_stats.get('vocabulary_size', 'N/A'):,}")
            lines.append(f"   • Lexical diversity: {basic_stats.get('lexical_diversity', 'N/A'):.4f}")
            
            diversity_score = basic_stats.get('lexical_diversity', 0)
            if diversity_score > 0.7:
                lines.append(f"   • Assessment: HIGH diversity - Rich vocabulary usage")
            elif diversity_score > 0.4:
                lines.append(f"   • Assessment: MODERATE diversity - Balanced vocabulary")
            else:
                lines.append(f"   • Assessment: LOW diversity - Repetitive language patterns")
            lines.append("")
            
            # Word frequency analysis (text-based chart)
            word_freq = text_results.get('word_frequency', {})
            if word_freq:
                lines.append("## Top 10 Most Frequent Words")
                lines.append("```")
                lines.append("Rank | Word           | Count | Bar Chart")
                lines.append("-----|----------------|-------|" + "-" * 30)
                
                max_freq = max(word_freq.values()) if word_freq else 1
                for i, (word, count) in enumerate(list(word_freq.items())[:10], 1):
                    bar_length = int((count / max_freq) * 25)
                    bar = "█" * bar_length + "░" * (25 - bar_length)
                    lines.append(f" {i:2d}  | {word:<14} | {count:4d}  | {bar}")
                lines.append("```")
                lines.append("")
            
            # Sentence analysis
            sentence_analysis = text_results.get('sentence_analysis', {})
            if sentence_analysis:
                lines.append("## Sentence Length Analysis")
                lines.append(f"📏 **Length Statistics:**")
                lines.append(f"   • Mean length: {sentence_analysis.get('mean', 'N/A'):.1f} words")
                lines.append(f"   • Median length: {sentence_analysis.get('median', 'N/A'):.1f} words")
                lines.append(f"   • Range: {sentence_analysis.get('min', 'N/A')} - {sentence_analysis.get('max', 'N/A')} words")
                lines.append("")
                
                # Distribution visualization
                lines.append("📊 **Length Distribution:**")
                short_pct = sentence_analysis.get('short_sentences_pct', 0)
                medium_pct = sentence_analysis.get('medium_sentences_pct', 0)
                long_pct = sentence_analysis.get('long_sentences_pct', 0)
                
                lines.append(f"   • Short (≤10 words):  {short_pct:5.1f}%  {'█' * int(short_pct/5)}")
                lines.append(f"   • Medium (11-20):     {medium_pct:5.1f}%  {'█' * int(medium_pct/5)}")
                lines.append(f"   • Long (>20 words):   {long_pct:5.1f}%  {'█' * int(long_pct/5)}")
                lines.append("")
            
            # Complexity metrics
            complexity = text_results.get('complexity_metrics', {})
            if complexity:
                lines.append("## Text Complexity Assessment")
                complexity_score = complexity.get('complexity_score', 0)
                lines.append(f"🎯 **Overall Complexity Score:** {complexity_score:.4f}")
                
                if complexity_score > 0.7:
                    lines.append("   • Level: HIGH complexity")
                    lines.append("   • Reading difficulty: Advanced")
                    lines.append("   • Recommendation: Consider simplification for broader audience")
                elif complexity_score > 0.4:
                    lines.append("   • Level: MODERATE complexity")
                    lines.append("   • Reading difficulty: Intermediate")
                    lines.append("   • Recommendation: Appropriate for educated readers")
                else:
                    lines.append("   • Level: LOW complexity")
                    lines.append("   • Reading difficulty: Basic")
                    lines.append("   • Recommendation: Accessible to general audience")
                lines.append("")
                
                # Readability metrics
                readability = complexity.get('readability_metrics', {})
                if readability:
                    lines.append("📖 **Readability Metrics:**")
                    flesch_score = readability.get('flesch_reading_ease', 0)
                    lines.append(f"   • Flesch Reading Ease: {flesch_score:.1f}")
                    
                    if flesch_score >= 70:
                        lines.append("     → Easy to read (7th grade level)")
                    elif flesch_score >= 50:
                        lines.append("     → Moderately difficult (high school level)")
                    elif flesch_score >= 30:
                        lines.append("     → Difficult (college level)")
                    else:
                        lines.append("     → Very difficult (graduate level)")
                    
                    ari_score = readability.get('automated_readability_index', 0)
                    lines.append(f"   • Automated Readability Index: {ari_score:.1f}")
                    lines.append("")
            
            # Part of speech distribution
            pos_dist = text_results.get('pos_distribution', {})
            if pos_dist:
                lines.append("## Part-of-Speech Distribution")
                lines.append("```")
                lines.append("Category        | Count | Percentage | Visual")
                lines.append("----------------|-------|------------|" + "-" * 20)
                
                total_pos = sum(pos_dist.values())
                for pos_type, count in sorted(pos_dist.items(), key=lambda x: x[1], reverse=True):
                    percentage = (count / total_pos) * 100
                    bar_length = int(percentage / 5)  # Scale to fit
                    bar = "█" * bar_length + "░" * (20 - bar_length)
                    lines.append(f"{pos_type:<15} | {count:5d} | {percentage:7.1f}%  | {bar}")
                lines.append("```")
                lines.append("")
            
            # Save description to file
            filename = os.path.join(self.output_dir, f"{prefix}_text_structure_description.md")
            with open(filename, 'w', encoding='utf-8') as f:
                f.write("\\n".join(lines))
            
            return filename
            
        except Exception as e:
            print(f"Error creating text structure description: {e}")
            return None
    
    def _create_numerical_description(self, num_results: Dict[str, Any], prefix: str) -> Optional[str]:
        """Create detailed numerical analysis description."""
        
        try:
            lines = []
            lines.append("# Numerical Analysis Description")
            lines.append("=" * 50)
            lines.append("")
            
            # Summary statistics
            summary = num_results.get('summary', {})
            lines.append("## Numerical Content Overview")
            lines.append(f"🔢 **Discovery Summary:**")
            lines.append(f"   • Total numbers found: {summary.get('total_numbers_found', 'N/A'):,}")
            lines.append(f"   • Valid numbers: {summary.get('valid_numbers', 'N/A'):,}")
            lines.append(f"   • Invalid/problematic: {summary.get('invalid_numbers', 'N/A'):,}")
            lines.append(f"   • Scientific notation: {summary.get('scientific_notation_count', 'N/A'):,}")
            lines.append("")
            
            # Distribution analysis
            distribution = num_results.get('distribution', {})
            if distribution and distribution.get('count', 0) > 0:
                lines.append("## Statistical Distribution")
                lines.append(f"📊 **Central Tendency:**")
                lines.append(f"   • Mean: {distribution.get('mean', 'N/A'):.6e}")
                lines.append(f"   • Median: {distribution.get('median', 'N/A'):.6e}")
                lines.append(f"   • Standard deviation: {distribution.get('std', 'N/A'):.6e}")
                lines.append("")
                
                lines.append(f"📏 **Range Analysis:**")
                lines.append(f"   • Minimum value: {distribution.get('min', 'N/A'):.6e}")
                lines.append(f"   • Maximum value: {distribution.get('max', 'N/A'):.6e}")
                lines.append(f"   • Total range: {distribution.get('range', 'N/A'):.6e}")
                lines.append("")
                
                # Quartile analysis
                lines.append(f"📈 **Quartile Breakdown:**")
                lines.append(f"   • Q1 (25th percentile): {distribution.get('q25', 'N/A'):.6e}")
                lines.append(f"   • Q2 (50th percentile): {distribution.get('median', 'N/A'):.6e}")
                lines.append(f"   • Q3 (75th percentile): {distribution.get('q75', 'N/A'):.6e}")
                lines.append("")
                
                # Value type distribution
                pos_count = distribution.get('positives_count', 0)
                neg_count = distribution.get('negatives_count', 0)
                zero_count = distribution.get('zeros_count', 0)
                total_nums = pos_count + neg_count + zero_count
                
                if total_nums > 0:
                    lines.append(f"🎭 **Value Type Distribution:**")
                    pos_pct = (pos_count / total_nums) * 100
                    neg_pct = (neg_count / total_nums) * 100
                    zero_pct = (zero_count / total_nums) * 100
                    
                    lines.append(f"   • Positive numbers: {pos_count:4d} ({pos_pct:5.1f}%)  {'█' * int(pos_pct/5)}")
                    lines.append(f"   • Negative numbers: {neg_count:4d} ({neg_pct:5.1f}%)  {'█' * int(neg_pct/5)}")
                    lines.append(f"   • Zero values:      {zero_count:4d} ({zero_pct:5.1f}%)  {'█' * int(zero_pct/5)}")
                    lines.append("")
            
            # Anomaly analysis
            anomalies = num_results.get('anomalies', {})
            if anomalies:
                anomaly_summary = anomalies.get('summary', {})
                lines.append("## Anomaly Detection Results")
                
                total_anomalies = anomaly_summary.get('total_anomalies', 0)
                anomaly_rate = anomaly_summary.get('anomaly_rate', 0) * 100
                
                lines.append(f"🚨 **Anomaly Overview:**")
                lines.append(f"   • Total anomalies detected: {total_anomalies:,}")
                lines.append(f"   • Anomaly rate: {anomaly_rate:.2f}%")
                lines.append("")
                
                # Detailed anomaly breakdown
                anomaly_types = anomaly_summary.get('anomaly_types', {})
                if anomaly_types:
                    lines.append("🔍 **Anomaly Type Breakdown:**")
                    
                    for anomaly_type, count in anomaly_types.items():
                        if count > 0:
                            if anomaly_type == 'nan':
                                lines.append(f"   • NaN (Not-a-Number): {count:,} occurrences")
                                lines.append(f"     → Impact: HIGH - Indicates data corruption or computational errors")
                            elif anomaly_type == 'inf':
                                lines.append(f"   • Infinity values: {count:,} occurrences")
                                lines.append(f"     → Impact: HIGH - May indicate overflow or division issues")
                            elif anomaly_type == 'zero_division':
                                lines.append(f"   • Zero division errors: {count:,} occurrences")
                                lines.append(f"     → Impact: CRITICAL - Computational failures detected")
                            elif anomaly_type == 'large_numbers':
                                lines.append(f"   • Large numbers: {count:,} occurrences")
                                lines.append(f"     → Impact: LOW - Numbers exceeding threshold (normal in scientific computing)")
                            elif anomaly_type == 'small_numbers':
                                lines.append(f"   • Small numbers: {count:,} occurrences")
                                lines.append(f"     → Impact: LOW - Very small values detected")
                    lines.append("")
                
                # Health assessment
                health_score = num_results.get('health_score', 0)
                lines.append("## Numerical Health Assessment")
                lines.append(f"🏥 **Health Score: {health_score:.3f}/1.000**")
                
                if health_score >= 0.9:
                    lines.append("   • Status: EXCELLENT ✅")
                    lines.append("   • Assessment: Minimal anomalies, high data quality")
                    lines.append("   • Recommendation: Data appears reliable for analysis")
                elif health_score >= 0.7:
                    lines.append("   • Status: GOOD ✅")
                    lines.append("   • Assessment: Some anomalies present but manageable")
                    lines.append("   • Recommendation: Monitor specific anomaly types")
                elif health_score >= 0.5:
                    lines.append("   • Status: MODERATE ⚠️")
                    lines.append("   • Assessment: Notable anomalies requiring attention")
                    lines.append("   • Recommendation: Investigate data quality issues")
                else:
                    lines.append("   • Status: POOR ❌")
                    lines.append("   • Assessment: Significant numerical problems detected")
                    lines.append("   • Recommendation: Immediate data quality review required")
                lines.append("")
            
            # Magnitude analysis (if available)
            if distribution and 'magnitude_analysis' in distribution:
                mag_analysis = distribution['magnitude_analysis']
                lines.append("## Magnitude Analysis")
                lines.append(f"📐 **Scale Characteristics:**")
                lines.append(f"   • Mean log magnitude: {mag_analysis.get('mean_log_magnitude', 'N/A'):.2f}")
                lines.append(f"   • Magnitude spread: {mag_analysis.get('std_log_magnitude', 'N/A'):.2f}")
                lines.append(f"   • Magnitude range: {mag_analysis.get('magnitude_range', 'N/A'):.2f} orders")
                lines.append("")
            
            # Save description to file
            filename = os.path.join(self.output_dir, f"{prefix}_numerical_description.md")
            with open(filename, 'w', encoding='utf-8') as f:
                f.write("\\n".join(lines))
            
            return filename
            
        except Exception as e:
            print(f"Error creating numerical description: {e}")
            return None
    
    def _create_trend_description(self, trend_results: Dict[str, Any], prefix: str) -> Optional[str]:
        """Create detailed trend analysis description."""
        
        try:
            lines = []
            lines.append("# Trend Analysis Description")
            lines.append("=" * 50)
            lines.append("")
            
            # Count sequences
            sequence_count = sum(1 for k, v in trend_results.items() 
                               if k != 'comparative_analysis' and isinstance(v, dict))
            
            lines.append(f"## Trend Analysis Overview")
            lines.append(f"📈 **Sequences Analyzed:** {sequence_count}")
            lines.append("")
            
            # Individual sequence analysis
            convergent_count = 0
            divergent_count = 0
            stagnant_count = 0
            oscillating_count = 0
            
            for seq_name, seq_analysis in trend_results.items():
                if seq_name == 'comparative_analysis' or not isinstance(seq_analysis, dict):
                    continue
                
                # Get sequence metadata for better description
                metadata = seq_analysis.get('sequence_metadata', {})
                description = metadata.get('description', 'Numerical sequence')
                sequence_type = metadata.get('sequence_type', 'general_numeric')
                keywords = metadata.get('keywords', [])
                
                # Create readable sequence name
                readable_name = seq_name.replace('_', ' ').title()
                
                # Create type emoji based on sequence type
                type_emoji = {
                    'convergence_metric': '🎯',
                    'error_metric': '⚠️',
                    'algorithm_parameter': '⚙️',
                    'performance_metric': '📈',
                    'iteration_progress': '🔄',
                    'convergence_rate': '📊',
                    'general_numeric': '📋'
                }.get(sequence_type, '📊')
                
                lines.append(f"### {type_emoji} Sequence: {readable_name}")
                lines.append(f"**Description:** {description}")
                if keywords:
                    lines.append(f"**Keywords:** {', '.join(keywords)}")
                lines.append("")
                
                # Check for insufficient data
                if seq_analysis.get('trend_type') == 'insufficient_data':
                    lines.append(f"   ⚠️  **Insufficient Data**")
                    lines.append(f"   • Message: {seq_analysis.get('message', 'Not enough data points')}")
                    lines.append("")
                    continue
                
                # Basic sequence info
                data_points = seq_analysis.get('data_points', 'N/A')
                trend_type = seq_analysis.get('trend_type', 'unknown')
                lines.append(f"   📊 **Basic Information:**")
                lines.append(f"   • Data points: {data_points}")
                lines.append(f"   • Trend classification: {trend_type.upper()}")
                lines.append(f"   • Sequence type: {sequence_type.replace('_', ' ').title()}")
                
                # Count trend types
                if 'convergence' in trend_type.lower():
                    convergent_count += 1
                    lines.append(f"   • Pattern: CONVERGENT ✅")
                elif 'divergence' in trend_type.lower():
                    divergent_count += 1
                    lines.append(f"   • Pattern: DIVERGENT ⚠️")
                elif 'stagnation' in trend_type.lower():
                    stagnant_count += 1
                    lines.append(f"   • Pattern: STAGNANT 🔄")
                elif 'oscillation' in trend_type.lower():
                    oscillating_count += 1
                    lines.append(f"   • Pattern: OSCILLATING 〰️")
                lines.append("")
                
                # Trend metrics
                trend_metrics = seq_analysis.get('trend_metrics', {})
                if trend_metrics:
                    lines.append(f"   📏 **Trend Metrics:**")
                    mean_change = trend_metrics.get('mean_change', 0)
                    lines.append(f"   • Mean change per step: {mean_change:.6e}")
                    
                    acceleration = trend_metrics.get('acceleration', 0)
                    lines.append(f"   • Acceleration: {acceleration:.6e}")
                    
                    stability = trend_metrics.get('stability_score', 0)
                    lines.append(f"   • Stability score: {stability:.4f}")
                    
                    confidence = trend_metrics.get('confidence_score', 0)
                    lines.append(f"   • Confidence: {confidence:.4f}")
                    lines.append("")
                
                # Data range analysis
                data_range = seq_analysis.get('data_range', {})
                if data_range:
                    lines.append(f"   📐 **Value Range:**")
                    lines.append(f"   • Initial value: {data_range.get('first', 'N/A'):.6e}")
                    lines.append(f"   • Final value: {data_range.get('last', 'N/A'):.6e}")
                    lines.append(f"   • Total change: {data_range.get('change', 'N/A'):.6e}")
                    lines.append(f"   • Min/Max: [{data_range.get('min', 'N/A'):.6e}, {data_range.get('max', 'N/A'):.6e}]")
                    lines.append("")
                
                # Convergence analysis (if available)
                convergence = seq_analysis.get('convergence_analysis', {})
                if convergence:
                    conv_rate = convergence.get('convergence_rate', 'unknown')
                    conv_ratio = convergence.get('convergence_ratio', 'N/A')
                    
                    lines.append(f"   🎯 **Convergence Analysis:**")
                    lines.append(f"   • Rate classification: {conv_rate.upper()}")
                    
                    if isinstance(conv_ratio, (int, float)):
                        lines.append(f"   • Convergence ratio: {conv_ratio:.6f}")
                        
                        # Interpretation
                        if conv_rate == 'superlinear':
                            lines.append(f"   • Interpretation: EXCELLENT - Very fast convergence")
                        elif conv_rate == 'linear_fast':
                            lines.append(f"   • Interpretation: GOOD - Fast linear convergence")
                        elif conv_rate == 'linear_slow':
                            lines.append(f"   • Interpretation: MODERATE - Slow linear convergence")
                        elif conv_rate == 'sublinear':
                            lines.append(f"   • Interpretation: SLOW - Sublinear convergence")
                        elif conv_rate == 'diverging':
                            lines.append(f"   • Interpretation: PROBLEM - Sequence is diverging")
                    lines.append("")
                
                # Stagnation periods (if any)
                stagnation = seq_analysis.get('stagnation_periods', [])
                if stagnation:
                    lines.append(f"   ⏸️  **Stagnation Periods:** {len(stagnation)} detected")
                    for i, period in enumerate(stagnation[:3], 1):  # Show first 3
                        lines.append(f"   • Period {i}: Steps {period['start']}-{period['end']} (length: {period['length']})")
                    if len(stagnation) > 3:
                        lines.append(f"   • ... and {len(stagnation) - 3} more periods")
                    lines.append("")
                
                # Trend changes (if any)
                trend_changes = seq_analysis.get('trend_changes', [])
                if trend_changes:
                    lines.append(f"   🔄 **Trend Changes:** {len(trend_changes)} detected")
                    for i, change in enumerate(trend_changes[:3], 1):  # Show first 3
                        lines.append(f"   • Change {i}: Position {change['position']}, Type: {change['type']}")
                        lines.append(f"     Magnitude: {change['magnitude']:.6e}")
                    if len(trend_changes) > 3:
                        lines.append(f"   • ... and {len(trend_changes) - 3} more changes")
                    lines.append("")
                
                lines.append("---")
                lines.append("")
            
            # Comparative analysis summary
            lines.append("## 📋 Summary Statistics")
            lines.append("")
            lines.append(f"**Pattern Distribution:**")
            lines.append(f"• Convergent sequences: {convergent_count} ({convergent_count/max(1,sequence_count)*100:.1f}%)")
            lines.append(f"• Divergent sequences: {divergent_count} ({divergent_count/max(1,sequence_count)*100:.1f}%)")
            lines.append(f"• Stagnant sequences: {stagnant_count} ({stagnant_count/max(1,sequence_count)*100:.1f}%)")
            lines.append(f"• Oscillating sequences: {oscillating_count} ({oscillating_count/max(1,sequence_count)*100:.1f}%)")
            lines.append("")
            
            # Overall assessment
            lines.append("## 🎯 Overall Assessment")
            lines.append("")
            
            if convergent_count > divergent_count:
                lines.append("✅ **POSITIVE**: Majority of sequences show convergent behavior")
                lines.append("   → Indicates stable computational processes")
            elif divergent_count > 0:
                lines.append("⚠️  **CAUTION**: Divergent sequences detected")
                lines.append("   → May indicate numerical instability issues")
            
            if stagnant_count > sequence_count * 0.5:
                lines.append("📊 **OBSERVATION**: High proportion of stagnant sequences")
                lines.append("   → May need to adjust convergence criteria or solution methods")
            
            lines.append("")
            
            # Save description to file
            filename = os.path.join(self.output_dir, f"{prefix}_trend_description.md")
            with open(filename, 'w', encoding='utf-8') as f:
                f.write("\\n".join(lines))
            
            return filename
            
        except Exception as e:
            print(f"Error creating trend description: {e}")
            return None
    
    def _create_dashboard_description(self, results: Dict[str, Any], prefix: str) -> Optional[str]:
        """Create a comprehensive dashboard description."""
        
        try:
            lines = []
            lines.append("# Analysis Dashboard Summary")
            lines.append("=" * 60)
            lines.append("")
            
            # File information
            file_info = results.get('file_info', {})
            lines.append("## 📄 File Information")
            lines.append(f"• **Filename:** {file_info.get('filename', 'N/A')}")
            lines.append(f"• **Size:** {file_info.get('size_bytes', 0):,} bytes")
            lines.append(f"• **Encoding:** {file_info.get('encoding', 'N/A')}")
            lines.append(f"• **Processing time:** {file_info.get('processing_time', 0):.3f} seconds")
            lines.append("")
            
            # Quick metrics overview
            text_stats = results.get('text_analysis', {}).get('basic_statistics', {})
            num_summary = results.get('numerical_analysis', {}).get('summary', {})
            num_health = results.get('numerical_analysis', {}).get('health_score', 0)
            
            lines.append("## 📊 Quick Metrics Overview")
            lines.append("```")
            lines.append("Metric                    | Value           | Status")
            lines.append("--------------------------|-----------------|----------")
            lines.append(f"Characters                | {text_stats.get('total_characters', 0):,} | ✓")
            lines.append(f"Words                     | {text_stats.get('total_words', 0):,} | ✓")
            lines.append(f"Sentences                 | {text_stats.get('total_sentences', 0):,} | ✓")
            lines.append(f"Vocabulary size           | {text_stats.get('vocabulary_size', 0):,} | ✓")
            lines.append(f"Numbers found             | {num_summary.get('total_numbers_found', 0):,} | ✓")
            lines.append(f"Numerical health          | {num_health:.3f} | {'✓' if num_health > 0.7 else '⚠️' if num_health > 0.4 else '❌'}")
            lines.append("```")
            lines.append("")
            
            # Health assessment gauge (text-based)
            lines.append("## 🏥 Health Assessment Gauge")
            lines.append("")
            
            gauge_value = int(num_health * 20)  # Scale to 0-20
            gauge_bar = "█" * gauge_value + "░" * (20 - gauge_value)
            lines.append(f"Numerical Health: [{gauge_bar}] {num_health:.3f}")
            lines.append("")
            
            if num_health >= 0.9:
                lines.append("🟢 **EXCELLENT** - Minimal anomalies, high data quality")
            elif num_health >= 0.7:
                lines.append("🟡 **GOOD** - Some anomalies present but manageable")
            elif num_health >= 0.5:
                lines.append("🟠 **MODERATE** - Notable anomalies requiring attention")
            else:
                lines.append("🔴 **POOR** - Significant numerical problems detected")
            lines.append("")
            
            # Key findings summary
            lines.append("## 🔍 Key Findings")
            
            # Text complexity
            complexity = results.get('text_analysis', {}).get('complexity_metrics', {}).get('complexity_score', 0)
            if complexity > 0.7:
                lines.append("• 📚 **Text Complexity:** HIGH - Advanced vocabulary and structure")
            elif complexity > 0.4:
                lines.append("• 📚 **Text Complexity:** MODERATE - Balanced readability")
            else:
                lines.append("• 📚 **Text Complexity:** LOW - Simple and accessible")
            
            # Numerical anomalies
            anomalies = results.get('numerical_analysis', {}).get('anomalies', {}).get('summary', {})
            total_anomalies = anomalies.get('total_anomalies', 0)
            if total_anomalies > 0:
                lines.append(f"• 🚨 **Anomalies Detected:** {total_anomalies:,} numerical anomalies found")
            else:
                lines.append("• ✅ **Data Quality:** No significant anomalies detected")
            
            # Trend patterns
            trend_results = results.get('trend_analysis', {})
            convergent_count = sum(1 for k, v in trend_results.items() 
                                 if isinstance(v, dict) and 'convergence' in v.get('trend_type', '').lower())
            divergent_count = sum(1 for k, v in trend_results.items() 
                                if isinstance(v, dict) and 'divergence' in v.get('trend_type', '').lower())
            
            if convergent_count > 0:
                lines.append(f"• 📈 **Convergent Trends:** {convergent_count} sequences showing convergence")
            if divergent_count > 0:
                lines.append(f"• ⚠️  **Divergent Trends:** {divergent_count} sequences showing divergence")
            
            lines.append("")
            
            # Processing efficiency
            processing_time = file_info.get('processing_time', 0)
            file_size_mb = file_info.get('size_bytes', 0) / (1024 * 1024)
            if processing_time > 0:
                throughput = file_size_mb / processing_time
                lines.append("## ⚡ Processing Performance")
                lines.append(f"• **Processing speed:** {throughput:.2f} MB/second")
                lines.append(f"• **Efficiency:** {'Excellent' if throughput > 10 else 'Good' if throughput > 1 else 'Standard'}")
                lines.append("")
            
            # Quick action items
            lines.append("## 📋 Quick Action Items")
            
            action_items = []
            
            # Based on health score
            if num_health < 0.5:
                action_items.append("🔴 **URGENT:** Review data quality - significant anomalies detected")
            elif num_health < 0.7:
                action_items.append("🟠 **MODERATE:** Monitor numerical anomalies")
            
            # Based on complexity
            if complexity > 0.8:
                action_items.append("📚 **CONSIDER:** Simplify language for broader accessibility")
            
            # Based on trends
            if divergent_count > 0:
                action_items.append("📈 **INVESTIGATE:** Divergent sequences may indicate instability")
            
            if not action_items:
                action_items.append("✅ **MAINTENANCE:** Continue regular monitoring")
            
            for item in action_items:
                lines.append(f"• {item}")
            
            lines.append("")
            lines.append("---")
            lines.append("*Generated by MiniMax Agent - Structural Text Analyzer*")
            
            # Save description to file
            filename = os.path.join(self.output_dir, f"{prefix}_dashboard_summary.md")
            with open(filename, 'w', encoding='utf-8') as f:
                f.write("\\n".join(lines))
            
            return filename
            
        except Exception as e:
            print(f"Error creating dashboard description: {e}")
            return None
