"""
Report generation module.
Creates structured reports in various formats (Markdown, JSON, Text).
"""

import json
import datetime
from typing import Dict, List, Any, Optional
import os

class ReportGenerator:
    """Generates comprehensive reports from analysis results."""
    
    def __init__(self, config, output_dir: str = "output"):
        self.config = config
        self.output_dir = output_dir
        
        # Create output directory if it doesn't exist
        os.makedirs(output_dir, exist_ok=True)
        
    def generate_report(self, results: Dict[str, Any], filename: str = "analysis_report") -> str:
        """Generate a comprehensive analysis report."""
        
        output_format = self.config.output_format.lower()
        
        if output_format == 'markdown':
            return self._generate_markdown_report(results, filename)
        elif output_format == 'json':
            return self._generate_json_report(results, filename)
        elif output_format == 'txt':
            return self._generate_text_report(results, filename)
        else:
            # Default to markdown
            return self._generate_markdown_report(results, filename)
            
    def _generate_markdown_report(self, results: Dict[str, Any], filename: str) -> str:
        """Generate a detailed Markdown report."""
        
        report_lines = []
        
        # Header
        report_lines.extend([
            "# Automated Structural Information Extraction Report",
            "",
            f"**Generated:** {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"**Analysis Tool:** MiniMax Agent - Structural Text Analyzer",
            "",
            "---",
            ""
        ])
        
        # Executive Summary
        report_lines.extend(self._generate_executive_summary(results))
        
        # File Information
        if 'file_info' in results:
            report_lines.extend(self._generate_file_info_section(results['file_info']))
            
        # Text Structure Analysis
        if 'text_analysis' in results:
            report_lines.extend(self._generate_text_analysis_section(results['text_analysis']))
            
        # Numerical Analysis
        if 'numerical_analysis' in results:
            report_lines.extend(self._generate_numerical_analysis_section(results['numerical_analysis']))
            
        # Trend Analysis
        if 'trend_analysis' in results:
            report_lines.extend(self._generate_trend_analysis_section(results['trend_analysis']))
            
        # Recommendations
        report_lines.extend(self._generate_recommendations(results))
        
        # Appendices
        if self.config.include_raw_data:
            report_lines.extend(self._generate_appendices(results))
            
        # Save report
        report_content = "\n".join(report_lines)
        report_path = os.path.join(self.output_dir, f"{filename}.md")
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(report_content)
            
        return report_path
        
    def _generate_executive_summary(self, results: Dict[str, Any]) -> List[str]:
        """Generate executive summary section."""
        
        lines = [
            "## Executive Summary",
            ""
        ]
        
        # Extract key metrics
        text_stats = results.get('text_analysis', {}).get('basic_statistics', {})
        num_summary = results.get('numerical_analysis', {}).get('summary', {})
        num_health = results.get('numerical_analysis', {}).get('health_score', 0)
        
        # File overview
        lines.append("### File Overview")
        lines.append(f"- **Total Characters:** {text_stats.get('total_characters', 'N/A'):,}")
        lines.append(f"- **Total Words:** {text_stats.get('total_words', 'N/A'):,}")
        lines.append(f"- **Total Sentences:** {text_stats.get('total_sentences', 'N/A'):,}")
        lines.append(f"- **Total Lines:** {text_stats.get('total_lines', 'N/A'):,}")
        lines.append("")
        
        # Numerical content overview
        lines.append("### Numerical Content")
        lines.append(f"- **Numbers Found:** {num_summary.get('total_numbers_found', 'N/A'):,}")
        lines.append(f"- **Valid Numbers:** {num_summary.get('valid_numbers', 'N/A'):,}")
        lines.append(f"- **Scientific Notation:** {num_summary.get('scientific_notation_count', 'N/A'):,}")
        lines.append(f"- **Numerical Health Score:** {num_health:.3f} (0-1 scale, higher is better)")
        lines.append("")
        
        # Health assessment
        lines.append("### Health Assessment")
        if num_health >= 0.8:
            lines.append("🟢 **EXCELLENT** - Minimal numerical anomalies detected")
        elif num_health >= 0.6:
            lines.append("🟡 **GOOD** - Some numerical anomalies present but manageable")
        elif num_health >= 0.4:
            lines.append("🟠 **MODERATE** - Notable numerical anomalies requiring attention")
        else:
            lines.append("🔴 **POOR** - Significant numerical anomalies detected")
        lines.append("")
        
        # Trend summary
        trend_results = results.get('trend_analysis', {})
        if trend_results:
            trend_count = sum(1 for k, v in trend_results.items() 
                            if k != 'comparative_analysis' and isinstance(v, dict))
            lines.append(f"### Trend Analysis")
            lines.append(f"- **Sequences Analyzed:** {trend_count}")
            
            # Find most common trend type
            trend_types = []
            for k, v in trend_results.items():
                if k != 'comparative_analysis' and isinstance(v, dict):
                    trend_types.append(v.get('trend_type', 'unknown'))
            
            if trend_types:
                most_common = max(set(trend_types), key=trend_types.count)
                lines.append(f"- **Most Common Trend:** {most_common.title()}")
            
        lines.extend(["", "---", ""])
        
        return lines
        
    def _generate_file_info_section(self, file_info: Dict[str, Any]) -> List[str]:
        """Generate file information section."""
        
        lines = [
            "## File Information",
            "",
            "| Attribute | Value |",
            "|-----------|-------|",
            f"| **Filename** | `{file_info.get('filename', 'N/A')}` |",
            f"| **Size** | {file_info.get('size_bytes', 'N/A'):,} bytes |",
            f"| **Encoding** | {file_info.get('encoding', 'N/A')} |",
            f"| **Analysis Date** | {file_info.get('analysis_date', 'N/A')} |",
            f"| **Processing Time** | {file_info.get('processing_time', 'N/A'):.3f} seconds |",
            "",
            "---",
            ""
        ]
        
        return lines
        
    def _generate_text_analysis_section(self, text_analysis: Dict[str, Any]) -> List[str]:
        """Generate text structure analysis section."""
        
        lines = [
            "## Text Structure Analysis",
            ""
        ]
        
        # Basic statistics
        basic_stats = text_analysis.get('basic_statistics', {})
        lines.extend([
            "### Basic Statistics",
            "",
            "| Metric | Value |",
            "|--------|-------|",
            f"| **Total Characters** | {basic_stats.get('total_characters', 'N/A'):,} |",
            f"| **Total Words** | {basic_stats.get('total_words', 'N/A'):,} |",
            f"| **Total Sentences** | {basic_stats.get('total_sentences', 'N/A'):,} |",
            f"| **Total Lines** | {basic_stats.get('total_lines', 'N/A'):,} |",
            f"| **Average Sentence Length** | {basic_stats.get('avg_sentence_length', 'N/A'):.2f} words |",
            f"| **Average Word Length** | {basic_stats.get('avg_word_length', 'N/A'):.2f} characters |",
            f"| **Vocabulary Size** | {basic_stats.get('vocabulary_size', 'N/A'):,} unique words |",
            f"| **Lexical Diversity** | {basic_stats.get('lexical_diversity', 'N/A'):.4f} |",
            ""
        ])
        
        # Sentence analysis
        sentence_analysis = text_analysis.get('sentence_analysis', {})
        if sentence_analysis:
            lines.extend([
                "### Sentence Length Distribution",
                "",
                "| Statistic | Value |",
                "|-----------|-------|",
                f"| **Mean Length** | {sentence_analysis.get('mean', 'N/A'):.2f} words |",
                f"| **Median Length** | {sentence_analysis.get('median', 'N/A'):.2f} words |",
                f"| **Standard Deviation** | {sentence_analysis.get('std', 'N/A'):.2f} |",
                f"| **Minimum Length** | {sentence_analysis.get('min', 'N/A')} words |",
                f"| **Maximum Length** | {sentence_analysis.get('max', 'N/A')} words |",
                f"| **Short Sentences (≤10 words)** | {sentence_analysis.get('short_sentences_pct', 'N/A'):.1f}% |",
                f"| **Medium Sentences (11-20 words)** | {sentence_analysis.get('medium_sentences_pct', 'N/A'):.1f}% |",
                f"| **Long Sentences (>20 words)** | {sentence_analysis.get('long_sentences_pct', 'N/A'):.1f}% |",
                ""
            ])
            
        # Complexity metrics
        complexity = text_analysis.get('complexity_metrics', {})
        if complexity:
            lines.extend([
                "### Complexity Analysis",
                "",
                "| Metric | Value | Interpretation |",
                "|--------|-------|----------------|",
                f"| **Complexity Score** | {complexity.get('complexity_score', 'N/A'):.4f} | {'High' if complexity.get('complexity_score', 0) > 0.7 else 'Medium' if complexity.get('complexity_score', 0) > 0.4 else 'Low'} complexity |"
            ])
            
            readability = complexity.get('readability_metrics', {})
            if readability:
                lines.extend([
                    f"| **Flesch Reading Ease** | {readability.get('flesch_reading_ease', 'N/A'):.1f} | {'Easy' if readability.get('flesch_reading_ease', 0) > 70 else 'Moderate' if readability.get('flesch_reading_ease', 0) > 50 else 'Difficult'} to read |",
                    f"| **Avg Sentence Length** | {readability.get('avg_sentence_length', 'N/A'):.1f} words | - |",
                    f"| **Avg Syllables per Word** | {readability.get('avg_syllables_per_word', 'N/A'):.2f} | - |"
                ])
            
            lines.append("")
            
        # Top words
        word_freq = text_analysis.get('word_frequency', {})
        if word_freq:
            lines.extend([
                "### Most Frequent Words",
                "",
                "| Rank | Word | Frequency |",
                "|------|------|-----------|"                
            ])
            
            for i, (word, freq) in enumerate(list(word_freq.items())[:10], 1):
                lines.append(f"| {i} | `{word}` | {freq} |")
            
            lines.append("")
            
        lines.extend(["---", ""])
        
        return lines
        
    def _generate_numerical_analysis_section(self, numerical_analysis: Dict[str, Any]) -> List[str]:
        """Generate numerical analysis section."""
        
        lines = [
            "## Numerical Analysis",
            ""
        ]
        
        # Summary
        summary = numerical_analysis.get('summary', {})
        lines.extend([
            "### Numerical Content Summary",
            "",
            "| Metric | Count |",
            "|--------|-------|",
            f"| **Total Numbers Found** | {summary.get('total_numbers_found', 'N/A'):,} |",
            f"| **Valid Numbers** | {summary.get('valid_numbers', 'N/A'):,} |",
            f"| **Invalid Numbers** | {summary.get('invalid_numbers', 'N/A'):,} |",
            f"| **Scientific Notation** | {summary.get('scientific_notation_count', 'N/A'):,} |",
            ""
        ])
        
        # Distribution
        distribution = numerical_analysis.get('distribution', {})
        if distribution:
            lines.extend([
                "### Statistical Distribution",
                "",
                "| Statistic | Value |",
                "|-----------|-------|",
                f"| **Count** | {distribution.get('count', 'N/A'):,} |",
                f"| **Mean** | {distribution.get('mean', 'N/A'):.6e} |",
                f"| **Median** | {distribution.get('median', 'N/A'):.6e} |",
                f"| **Standard Deviation** | {distribution.get('std', 'N/A'):.6e} |",
                f"| **Minimum** | {distribution.get('min', 'N/A'):.6e} |",
                f"| **Maximum** | {distribution.get('max', 'N/A'):.6e} |",
                f"| **Range** | {distribution.get('range', 'N/A'):.6e} |",
                f"| **25th Percentile** | {distribution.get('q25', 'N/A'):.6e} |",
                f"| **75th Percentile** | {distribution.get('q75', 'N/A'):.6e} |",
                f"| **Positive Numbers** | {distribution.get('positives_count', 'N/A'):,} |",
                f"| **Negative Numbers** | {distribution.get('negatives_count', 'N/A'):,} |",
                f"| **Zero Values** | {distribution.get('zeros_count', 'N/A'):,} |",
                ""
            ])
            
        # Anomalies
        anomalies = numerical_analysis.get('anomalies', {})
        if anomalies:
            anomaly_summary = anomalies.get('summary', {})
            anomaly_types = anomaly_summary.get('anomaly_types', {})
            
            lines.extend([
                "### Anomaly Detection",
                "",
                f"**Total Anomalies Found:** {anomaly_summary.get('total_anomalies', 'N/A'):,}",
                f"**Anomaly Rate:** {anomaly_summary.get('anomaly_rate', 0)*100:.2f}%",
                "",
                "| Anomaly Type | Count | Description |",
                "|--------------|-------|-------------|",
                f"| **NaN Values** | {anomaly_types.get('nan', 'N/A'):,} | Not-a-Number values |",
                f"| **Infinity Values** | {anomaly_types.get('inf', 'N/A'):,} | Infinite values |",
                f"| **Zero Division** | {anomaly_types.get('zero_division', 'N/A'):,} | Division by zero occurrences |",
                f"| **Large Numbers** | {anomaly_types.get('large_numbers', 'N/A'):,} | Values > {self.config.large_number_threshold:.0e} |",
                f"| **Small Numbers** | {anomaly_types.get('small_numbers', 'N/A'):,} | Values < {self.config.small_number_threshold:.0e} |",
                ""
            ])
            
        # Health score
        health_score = numerical_analysis.get('health_score', 0)
        lines.extend([
            "### Numerical Health Assessment",
            "",
            f"**Health Score:** {health_score:.3f}/1.000",
            ""
        ])
        
        if health_score >= 0.8:
            lines.extend([
                "🟢 **EXCELLENT** - The numerical data appears to be very clean with minimal anomalies.",
                "- Low anomaly rate indicates reliable computational results",
                "- Numerical stability appears good"
            ])
        elif health_score >= 0.6:
            lines.extend([
                "🟡 **GOOD** - The numerical data has some anomalies but is generally reliable.",
                "- Monitor for specific anomaly types that might affect results",
                "- Consider investigating sources of numerical instability"
            ])
        elif health_score >= 0.4:
            lines.extend([
                "🟠 **MODERATE** - Notable numerical anomalies present.",
                "- Significant anomalies detected that warrant investigation",
                "- Check computational algorithms and input data quality",
                "- Consider improving numerical stability"
            ])
        else:
            lines.extend([
                "🔴 **POOR** - Significant numerical problems detected.",
                "- High anomaly rate suggests serious computational issues",
                "- Immediate investigation recommended",
                "- Check for algorithmic bugs, input data corruption, or numerical instability"
            ])
            
        lines.extend(["", "---", ""])
        
        return lines
        
    def _generate_trend_analysis_section(self, trend_analysis: Dict[str, Any]) -> List[str]:
        """Generate trend analysis section."""
        
        lines = [
            "## Trend Analysis",
            ""
        ]
        
        # Count sequences
        sequence_count = sum(1 for k, v in trend_analysis.items() 
                           if k != 'comparative_analysis' and isinstance(v, dict))
        
        lines.extend([
            f"**Sequences Analyzed:** {sequence_count}",
            ""
        ])
        
        # Individual sequence analysis
        for seq_name, seq_analysis in trend_analysis.items():
            if seq_name == 'comparative_analysis' or not isinstance(seq_analysis, dict):
                continue
                
            lines.extend([
                f"### Sequence: {seq_name}",
                ""
            ])
            
            # Check if insufficient data
            if seq_analysis.get('trend_type') == 'insufficient_data':
                lines.extend([
                    f"⚠️ **Insufficient Data** - {seq_analysis.get('message', 'Not enough data points for analysis')}",
                    ""
                ])
                continue
                
            # Basic info
            lines.extend([
                "| Attribute | Value |",
                "|-----------|-------|",
                f"| **Data Points** | {seq_analysis.get('data_points', 'N/A')} |",
                f"| **Trend Type** | {seq_analysis.get('trend_type', 'N/A').title()} |"
            ])
            
            # Trend metrics
            trend_metrics = seq_analysis.get('trend_metrics', {})
            if trend_metrics:
                lines.extend([
                    f"| **Mean Change** | {trend_metrics.get('mean_change', 'N/A'):.6e} |",
                    f"| **Acceleration** | {trend_metrics.get('acceleration', 'N/A'):.6e} |",
                    f"| **Stability Score** | {trend_metrics.get('stability_score', 'N/A'):.4f} |",
                    f"| **Confidence Score** | {trend_metrics.get('confidence_score', 'N/A'):.4f} |"
                ])
                
            # Data range
            data_range = seq_analysis.get('data_range', {})
            if data_range:
                lines.extend([
                    f"| **First Value** | {data_range.get('first', 'N/A'):.6e} |",
                    f"| **Last Value** | {data_range.get('last', 'N/A'):.6e} |",
                    f"| **Total Change** | {data_range.get('change', 'N/A'):.6e} |",
                    f"| **Min Value** | {data_range.get('min', 'N/A'):.6e} |",
                    f"| **Max Value** | {data_range.get('max', 'N/A'):.6e} |"
                ])
                
            lines.append("")
            
            # Convergence analysis
            convergence = seq_analysis.get('convergence_analysis', {})
            if convergence:
                conv_rate = convergence.get('convergence_rate', 'N/A')
                conv_ratio = convergence.get('convergence_ratio', 'N/A')
                
                # Format convergence ratio properly
                if isinstance(conv_ratio, (int, float)):
                    conv_ratio_str = f"{conv_ratio:.6f}"
                else:
                    conv_ratio_str = str(conv_ratio)
                
                lines.extend([
                    f"**Convergence Analysis:**",
                    f"- **Rate Classification:** {conv_rate.title() if isinstance(conv_rate, str) else conv_rate}",
                    f"- **Convergence Ratio:** {conv_ratio_str}",
                    ""
                ])
                
                # Interpretation
                if isinstance(conv_rate, str):
                    if 'superlinear' in conv_rate:
                        lines.append("  🟢 **Excellent convergence** - Very fast convergence rate")
                    elif 'linear_fast' in conv_rate:
                        lines.append("  🟡 **Good convergence** - Fast linear convergence")
                    elif 'linear_slow' in conv_rate:
                        lines.append("  🟠 **Moderate convergence** - Slow linear convergence")
                    elif 'sublinear' in conv_rate:
                        lines.append("  🟠 **Slow convergence** - Sublinear convergence rate")
                    elif 'diverging' in conv_rate:
                        lines.append("  🔴 **Diverging** - Sequence is not converging")
                        
                lines.append("")
                
            # Stagnation periods
            stagnation = seq_analysis.get('stagnation_periods', [])
            if stagnation:
                lines.extend([
                    f"**Stagnation Periods Detected:** {len(stagnation)}",
                    ""
                ])
                
                for i, period in enumerate(stagnation[:5]):  # Show first 5
                    lines.append(f"- Period {i+1}: Points {period['start']}-{period['end']} (length: {period['length']})")
                    
                if len(stagnation) > 5:
                    lines.append(f"- ... and {len(stagnation) - 5} more periods")
                    
                lines.append("")
                
            # Trend changes
            trend_changes = seq_analysis.get('trend_changes', [])
            if trend_changes:
                lines.extend([
                    f"**Trend Changes Detected:** {len(trend_changes)}",
                    ""
                ])
                
                for i, change in enumerate(trend_changes[:3]):  # Show first 3
                    lines.append(f"- Change {i+1}: Position {change['position']}, Type: {change['type']}, Magnitude: {change['magnitude']:.6e}")
                    
                if len(trend_changes) > 3:
                    lines.append(f"- ... and {len(trend_changes) - 3} more changes")
                    
                lines.append("")
                
            lines.append("")
            
        # Comparative analysis
        if 'comparative_analysis' in trend_analysis:
            comp_analysis = trend_analysis['comparative_analysis']
            
            lines.extend([
                "### Comparative Analysis",
                "",
                "| Metric | Value |",
                "|--------|-------|",
                f"| **Sequences Count** | {comp_analysis.get('sequence_count', 'N/A')} |"
            ])
            
            length_stats = comp_analysis.get('length_statistics', {})
            if length_stats:
                lines.extend([
                    f"| **Min Sequence Length** | {length_stats.get('min_length', 'N/A')} |",
                    f"| **Max Sequence Length** | {length_stats.get('max_length', 'N/A')} |",
                    f"| **Mean Sequence Length** | {length_stats.get('mean_length', 'N/A'):.1f} |"
                ])
                
            conv_comparison = comp_analysis.get('convergence_comparison', {})
            if conv_comparison:
                lines.extend([
                    f"| **Best Convergence Ratio** | {conv_comparison.get('best_convergence', 'N/A'):.6f} |",
                    f"| **Worst Convergence Ratio** | {conv_comparison.get('worst_convergence', 'N/A'):.6f} |",
                    f"| **Mean Convergence Ratio** | {conv_comparison.get('mean_convergence_ratio', 'N/A'):.6f} |"
                ])
                
            lines.append("")
            
        lines.extend(["---", ""])
        
        return lines
        
    def _generate_recommendations(self, results: Dict[str, Any]) -> List[str]:
        """Generate recommendations based on analysis results."""
        
        lines = [
            "## Recommendations",
            ""
        ]
        
        recommendations = []
        
        # Numerical health recommendations
        num_analysis = results.get('numerical_analysis', {})
        health_score = num_analysis.get('health_score', 1.0)
        anomalies = num_analysis.get('anomalies', {}).get('summary', {}).get('anomaly_types', {})
        
        if health_score < 0.6:
            recommendations.append("🔧 **Investigate Numerical Anomalies** - Low health score indicates significant numerical issues that need attention.")
            
        if anomalies.get('nan', 0) > 0:
            recommendations.append("🔍 **Check for NaN Sources** - NaN values detected. Review input data and computational algorithms for potential issues.")
            
        if anomalies.get('inf', 0) > 0:
            recommendations.append("🔍 **Check for Infinity Sources** - Infinite values detected. Review for potential overflow conditions or division issues.")
            
        if anomalies.get('zero_division', 0) > 0:
            recommendations.append("⚠️ **Fix Zero Division Issues** - Zero division occurrences detected. Add proper input validation and error handling.")
            
        # Text analysis recommendations
        text_analysis = results.get('text_analysis', {})
        complexity = text_analysis.get('complexity_metrics', {}).get('complexity_score', 0)
        readability = text_analysis.get('complexity_metrics', {}).get('readability_metrics', {})
        
        if complexity > 0.8:
            recommendations.append("📖 **High Text Complexity** - Consider simplifying language and sentence structure for better readability.")
            
        flesch_score = readability.get('flesch_reading_ease', 100)
        if flesch_score < 30:
            recommendations.append("📚 **Improve Readability** - Text appears very difficult to read. Consider shortening sentences and using simpler words.")
            
        # Trend analysis recommendations
        trend_analysis = results.get('trend_analysis', {})
        diverging_sequences = []
        stagnating_sequences = []
        
        for seq_name, seq_analysis in trend_analysis.items():
            if seq_name == 'comparative_analysis' or not isinstance(seq_analysis, dict):
                continue
                
            conv_analysis = seq_analysis.get('convergence_analysis', {})
            if conv_analysis.get('convergence_rate') == 'diverging':
                diverging_sequences.append(seq_name)
                
            if seq_analysis.get('trend_type') == 'stagnation':
                stagnating_sequences.append(seq_name)
                
        if diverging_sequences:
            recommendations.append(f"📈 **Address Diverging Sequences** - The following sequences are diverging: {', '.join(diverging_sequences[:3])}. Check computational stability.")
            
        if stagnating_sequences:
            recommendations.append(f"⏸️ **Monitor Stagnating Sequences** - The following sequences show stagnation: {', '.join(stagnating_sequences[:3])}. Consider adjusting convergence criteria.")
            
        # Data quality recommendations
        file_info = results.get('file_info', {})
        file_size = file_info.get('size_bytes', 0)
        
        if file_size > 10 * 1024 * 1024:  # 10MB
            recommendations.append("💾 **Large File Processing** - Consider implementing chunked processing for better memory management with large files.")
            
        # Default recommendation if none generated
        if not recommendations:
            recommendations.append("✅ **Overall Good Quality** - No significant issues detected. Regular monitoring recommended for continued quality assurance.")
            
        # Add recommendations to report
        for i, rec in enumerate(recommendations, 1):
            lines.append(f"{i}. {rec}")
            lines.append("")
            
        lines.extend(["---", ""])
        
        return lines
        
    def _generate_appendices(self, results: Dict[str, Any]) -> List[str]:
        """Generate appendices with raw data."""
        
        lines = [
            "## Appendices",
            "",
            "### Appendix A: Raw Analysis Data",
            "",
            "```json",
            json.dumps(results, indent=2, default=str),
            "```",
            ""
        ]
        
        return lines
        
    def _generate_json_report(self, results: Dict[str, Any], filename: str) -> str:
        """Generate a JSON format report."""
        
        # Add metadata to results
        enhanced_results = {
            "metadata": {
                "report_generated": datetime.datetime.now().isoformat(),
                "analysis_tool": "MiniMax Agent - Structural Text Analyzer",
                "report_version": "1.0"
            },
            "analysis_results": results
        }
        
        report_path = os.path.join(self.output_dir, f"{filename}.json")
        
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(enhanced_results, f, indent=2, default=str)
            
        return report_path
        
    def _generate_text_report(self, results: Dict[str, Any], filename: str) -> str:
        """Generate a plain text format report."""
        
        # Convert markdown to plain text (simplified)
        markdown_lines = self._generate_markdown_report(results, f"{filename}_temp")
        
        # Read the markdown file and convert to plain text
        with open(markdown_lines, 'r', encoding='utf-8') as f:
            content = f.read()
            
        # Simple markdown to text conversion
        text_content = content
        text_content = text_content.replace('# ', '')
        text_content = text_content.replace('## ', '')
        text_content = text_content.replace('### ', '')
        text_content = text_content.replace('**', '')
        text_content = text_content.replace('`', '')
        text_content = text_content.replace('|', ' ')
        text_content = text_content.replace('---', '-' * 50)
        
        report_path = os.path.join(self.output_dir, f"{filename}.txt")
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(text_content)
            
        # Clean up temp file
        os.remove(markdown_lines)
        
        return report_path
