"""
Main structural analyzer class.
Coordinates all analysis modules and provides the main interface.
"""

import os
import time
import datetime
from typing import Dict, List, Any, Optional
from pathlib import Path

# Handle imports for both package and standalone modes
try:
    # Package mode imports
    from .config import AnalysisConfig, ConfigManager
    from .modules import (
        TextStructureAnalyzer,
        NumericalAnalyzer,
        TrendAnalyzer,
        ReportGenerator,
        Visualizer
    )
except ImportError:
    # Standalone mode imports
    import sys
    import os
    sys.path.insert(0, os.path.dirname(__file__))
    
    try:
        from config import AnalysisConfig, ConfigManager
        from modules.text_analyzer import TextStructureAnalyzer
        from modules.numerical_analyzer import NumericalAnalyzer
        from modules.trend_analyzer import TrendAnalyzer
        from modules.report_generator import ReportGenerator
        from modules.visualizer import Visualizer
    except ImportError as e:
        print(f"Warning: Could not import analysis modules: {e}", file=sys.stderr)
        # Create dummy classes to prevent total failure
        class DummyAnalyzer:
            def __init__(self, *args, **kwargs): pass
            def analyze(self, *args, **kwargs): 
                return {
                    'basic_statistics': {
                        'total_characters': 0,
                        'total_words': 0,
                        'total_sentences': 0,
                        'vocabulary_size': 0,
                        'lexical_diversity': 0
                    },
                    'summary': {
                        'total_numbers_found': 0,
                        'valid_numbers': 0
                    },
                    'health_score': 0.5
                }
            
            def extract_sequences(self, *args, **kwargs): 
                return []
            
            def analyze_multiple_sequences(self, *args, **kwargs):
                return {}
            
            def generate_report(self, *args, **kwargs):
                return "Report generation not available in dummy mode"
            
            def create_visualizations(self, *args, **kwargs):
                return []
            
            def visualize(self, *args, **kwargs):
                return "Visualization not available in dummy mode"
        
        TextStructureAnalyzer = DummyAnalyzer
        NumericalAnalyzer = DummyAnalyzer  
        TrendAnalyzer = DummyAnalyzer
        ReportGenerator = DummyAnalyzer
        Visualizer = DummyAnalyzer
        
        # Create dummy config classes
        class DummyConfig:
            def __init__(self, **kwargs):
                for k, v in kwargs.items():
                    setattr(self, k, v)
        
        class DummyConfigManager:
            @staticmethod
            def get_config_for_type(config_type):
                return DummyConfig(
                    large_number_threshold=1e10,
                    min_sequence_length=3,
                    convergence_tolerance=1e-6,
                    generate_plots=False,
                    output_format='markdown',
                    max_file_size_mb=100,
                    encoding='utf-8'
                )
            
            @staticmethod
            def create_custom_config(**kwargs):
                defaults = {
                    'large_number_threshold': 1e10,
                    'min_sequence_length': 3,
                    'convergence_tolerance': 1e-6,
                    'generate_plots': False,
                    'output_format': 'markdown',
                    'max_file_size_mb': 100,
                    'encoding': 'utf-8'
                }
                defaults.update(kwargs)
                return DummyConfig(**defaults)
        
        AnalysisConfig = DummyConfig
        ConfigManager = DummyConfigManager

class StructuralTextAnalyzer:
    """Main analyzer class that coordinates all analysis modules."""
    
    def __init__(self, config: Optional[AnalysisConfig] = None, output_dir: str = "output"):
        """Initialize the analyzer with configuration."""
        
        self.config = config or AnalysisConfig()
        self.output_dir = output_dir
        
        # Create output directory
        os.makedirs(output_dir, exist_ok=True)
        
        # Initialize analysis modules
        self.text_analyzer = TextStructureAnalyzer(self.config)
        self.numerical_analyzer = NumericalAnalyzer(self.config)
        self.trend_analyzer = TrendAnalyzer(self.config)
        self.report_generator = ReportGenerator(self.config, output_dir)
        self.visualizer = Visualizer(self.config, output_dir)
        
        # Analysis results storage
        self.results = {}
        
    @classmethod
    def from_text_type(cls, text_type: str, output_dir: str = "output") -> "StructuralTextAnalyzer":
        """Create analyzer with pre-configured settings for specific text types."""
        config = ConfigManager.get_config_for_type(text_type)
        return cls(config, output_dir)
        
    @classmethod 
    def from_custom_config(cls, output_dir: str = "output", **config_kwargs) -> "StructuralTextAnalyzer":
        """Create analyzer with custom configuration."""
        config = ConfigManager.create_custom_config(**config_kwargs)
        return cls(config, output_dir)
        
    def analyze_file(self, file_path: str, analysis_name: Optional[str] = None) -> Dict[str, Any]:
        """Analyze a text file and return comprehensive results."""
        
        start_time = time.time()
        
        # Validate file
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"File not found: {file_path}")
            
        file_size = os.path.getsize(file_path)
        if file_size > self.config.max_file_size_mb * 1024 * 1024:
            raise ValueError(f"File too large: {file_size} bytes > {self.config.max_file_size_mb}MB limit")
            
        # Load file content
        try:
            with open(file_path, 'r', encoding=self.config.encoding) as f:
                text_content = f.read()
        except UnicodeDecodeError:
            # Try fallback encodings
            for encoding in ['utf-8', 'latin1', 'cp1252']:
                try:
                    with open(file_path, 'r', encoding=encoding) as f:
                        text_content = f.read()
                        self.config.encoding = encoding
                        break
                except UnicodeDecodeError:
                    continue
            else:
                raise ValueError(f"Could not decode file with any supported encoding")
                
        # Store file information
        file_info = {
            'filename': os.path.basename(file_path),
            'filepath': file_path,
            'size_bytes': file_size,
            'encoding': self.config.encoding,
            'analysis_date': datetime.datetime.now().isoformat(),
            'analysis_name': analysis_name or f"analysis_{int(time.time())}"
        }
        
        # Perform analysis
        print(f"Analyzing file: {file_path}")
        print(f"File size: {file_size:,} bytes")
        print(f"Encoding: {self.config.encoding}")
        print("\nStarting analysis...")
        
        # Text structure analysis
        print("1. Analyzing text structure...")
        text_results = self.text_analyzer.analyze(text_content)
        
        # Numerical analysis
        print("2. Analyzing numerical content...")
        numerical_results = self.numerical_analyzer.analyze(text_content)
        
        # Extract numerical sequences for trend analysis
        print("3. Extracting numerical sequences...")
        sequences = self.numerical_analyzer.extract_sequences(text_content)
        
        # Trend analysis
        trend_results = {}
        if sequences:
            print(f"4. Analyzing trends in {len(sequences)} sequences...")
            trend_results = self.trend_analyzer.analyze_multiple_sequences(sequences)
        else:
            print("4. No numerical sequences found for trend analysis.")
            
        # Compile results
        end_time = time.time()
        file_info['processing_time'] = end_time - start_time
        
        self.results = {
            'file_info': file_info,
            'text_analysis': text_results,
            'numerical_analysis': numerical_results,
            'trend_analysis': trend_results,
            'sequences_found': sequences
        }
        
        print(f"\nAnalysis completed in {file_info['processing_time']:.2f} seconds")
        
        return self.results
        
    def analyze_text(self, text: str, analysis_name: Optional[str] = None) -> Dict[str, Any]:
        """Analyze text content directly (without file)."""
        
        start_time = time.time()
        
        # Store text information
        text_info = {
            'source': 'direct_text',
            'size_bytes': len(text.encode('utf-8')),
            'encoding': 'utf-8',
            'analysis_date': datetime.datetime.now().isoformat(),
            'analysis_name': analysis_name or f"text_analysis_{int(time.time())}"
        }
        
        print(f"Analyzing text content ({len(text)} characters)...")
        
        # Perform analysis
        print("\n1. Analyzing text structure...")
        text_results = self.text_analyzer.analyze(text)
        
        print("2. Analyzing numerical content...")
        numerical_results = self.numerical_analyzer.analyze(text)
        
        print("3. Extracting numerical sequences...")
        sequences = self.numerical_analyzer.extract_sequences(text)
        
        trend_results = {}
        if sequences:
            print(f"4. Analyzing trends in {len(sequences)} sequences...")
            trend_results = self.trend_analyzer.analyze_multiple_sequences(sequences)
        else:
            print("4. No numerical sequences found for trend analysis.")
            
        # Compile results
        end_time = time.time()
        text_info['processing_time'] = end_time - start_time
        
        self.results = {
            'file_info': text_info,
            'text_analysis': text_results,
            'numerical_analysis': numerical_results,
            'trend_analysis': trend_results,
            'sequences_found': sequences
        }
        
        print(f"\nAnalysis completed in {text_info['processing_time']:.2f} seconds")
        
        return self.results
        
    def generate_report(self, output_filename: Optional[str] = None) -> str:
        """Generate a comprehensive analysis report."""
        
        if not self.results:
            raise ValueError("No analysis results available. Run analyze_file() or analyze_text() first.")
            
        # Use analysis name as default filename
        if output_filename is None:
            analysis_name = self.results['file_info'].get('analysis_name', 'analysis_report')
            output_filename = analysis_name
            
        print(f"\nGenerating {self.config.output_format.upper()} report...")
        report_path = self.report_generator.generate_report(self.results, output_filename)
        
        print(f"Report saved to: {report_path}")
        return report_path
        
    def create_visualizations(self, output_prefix: Optional[str] = None) -> List[str]:
        """Create visualizations for the analysis results."""
        
        if not self.results:
            raise ValueError("No analysis results available. Run analyze_file() or analyze_text() first.")
            
        if not self.config.generate_plots:
            print("Visualization generation is disabled in configuration.")
            return []
            
        # Use analysis name as default prefix
        if output_prefix is None:
            analysis_name = self.results['file_info'].get('analysis_name', 'analysis')
            output_prefix = analysis_name
            
        print(f"\nGenerating visualizations...")
        created_files = self.visualizer.create_comprehensive_report(self.results, output_prefix)
        
        if created_files:
            print(f"Created {len(created_files)} visualization files:")
            for file_path in created_files:
                print(f"  - {file_path}")
        else:
            print("No visualizations were created.")
            
        return created_files
        
    def get_summary(self) -> Dict[str, Any]:
        """Get a quick summary of analysis results."""
        
        if not self.results:
            return {"error": "No analysis results available"}
            
        # Extract key metrics
        text_stats = self.results.get('text_analysis', {}).get('basic_statistics', {})
        num_summary = self.results.get('numerical_analysis', {}).get('summary', {})
        num_health = self.results.get('numerical_analysis', {}).get('health_score', 0)
        
        # Count trend sequences
        trend_count = 0
        convergent_count = 0
        divergent_count = 0
        
        for seq_name, seq_analysis in self.results.get('trend_analysis', {}).items():
            if seq_name != 'comparative_analysis' and isinstance(seq_analysis, dict):
                trend_count += 1
                trend_type = seq_analysis.get('trend_type', '')
                if 'convergence' in trend_type:
                    convergent_count += 1
                elif 'divergence' in trend_type:
                    divergent_count += 1
                    
        summary = {
            'file_info': {
                'filename': self.results['file_info'].get('filename', 'N/A'),
                'size_bytes': self.results['file_info'].get('size_bytes', 0),
                'processing_time': self.results['file_info'].get('processing_time', 0)
            },
            'text_metrics': {
                'characters': text_stats.get('total_characters', 0),
                'words': text_stats.get('total_words', 0),
                'sentences': text_stats.get('total_sentences', 0),
                'vocabulary_size': text_stats.get('vocabulary_size', 0),
                'lexical_diversity': text_stats.get('lexical_diversity', 0)
            },
            'numerical_metrics': {
                'total_numbers': num_summary.get('total_numbers_found', 0),
                'valid_numbers': num_summary.get('valid_numbers', 0),
                'health_score': num_health
            },
            'trend_metrics': {
                'sequences_analyzed': trend_count,
                'convergent_sequences': convergent_count,
                'divergent_sequences': divergent_count
            }
        }
        
        return summary
        
    def print_summary(self):
        """Print a formatted summary of analysis results."""
        
        summary = self.get_summary()
        
        if 'error' in summary:
            print(summary['error'])
            return
            
        print("\n" + "="*60)
        print("ANALYSIS SUMMARY")
        print("="*60)
        
        # File info
        file_info = summary['file_info']
        print(f"\nFile: {file_info['filename']}")
        print(f"Size: {file_info['size_bytes']:,} bytes")
        print(f"Processing time: {file_info['processing_time']:.2f} seconds")
        
        # Text metrics
        text_metrics = summary['text_metrics']
        print(f"\nTEXT ANALYSIS:")
        print(f"  Characters: {text_metrics['characters']:,}")
        print(f"  Words: {text_metrics['words']:,}")
        print(f"  Sentences: {text_metrics['sentences']:,}")
        print(f"  Vocabulary: {text_metrics['vocabulary_size']:,} unique words")
        print(f"  Lexical diversity: {text_metrics['lexical_diversity']:.4f}")
        
        # Numerical metrics
        num_metrics = summary['numerical_metrics']
        print(f"\nNUMERICAL ANALYSIS:")
        print(f"  Numbers found: {num_metrics['total_numbers']:,}")
        print(f"  Valid numbers: {num_metrics['valid_numbers']:,}")
        print(f"  Health score: {num_metrics['health_score']:.3f}/1.000")
        
        # Health assessment
        health = num_metrics['health_score']
        if health >= 0.8:
            print(f"  Status: 🟢 EXCELLENT")
        elif health >= 0.6:
            print(f"  Status: 🟡 GOOD")
        elif health >= 0.4:
            print(f"  Status: 🟠 MODERATE")
        else:
            print(f"  Status: 🔴 POOR")
            
        # Trend metrics
        trend_metrics = summary['trend_metrics']
        if trend_metrics['sequences_analyzed'] > 0:
            print(f"\nTREND ANALYSIS:")
            print(f"  Sequences analyzed: {trend_metrics['sequences_analyzed']}")
            print(f"  Convergent sequences: {trend_metrics['convergent_sequences']}")
            print(f"  Divergent sequences: {trend_metrics['divergent_sequences']}")
        
        print("\n" + "="*60)
        
    def save_results(self, output_path: Optional[str] = None) -> str:
        """Save raw analysis results to JSON file."""
        
        if not self.results:
            raise ValueError("No analysis results available. Run analyze_file() or analyze_text() first.")
            
        if output_path is None:
            analysis_name = self.results['file_info'].get('analysis_name', 'analysis_results')
            output_path = os.path.join(self.output_dir, f"{analysis_name}_raw.json")
            
        import json
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(self.results, f, indent=2, default=str)
            
        print(f"Raw results saved to: {output_path}")
        return output_path
        
    def run_complete_analysis(self, file_path: str, analysis_name: Optional[str] = None) -> Dict[str, str]:
        """Run complete analysis pipeline: analyze, report, visualize."""
        
        print(f"Starting complete analysis of: {file_path}")
        
        # Run analysis
        self.analyze_file(file_path, analysis_name)
        
        # Generate outputs
        outputs = {}
        
        # Generate report
        report_path = self.generate_report()
        outputs['report'] = report_path
        
        # Create visualizations
        if self.config.generate_plots:
            viz_files = self.create_visualizations()
            outputs['visualizations'] = viz_files
        
        # Save raw results
        raw_path = self.save_results()
        outputs['raw_data'] = raw_path
        
        # Print summary
        self.print_summary()
        
        print(f"\nComplete analysis finished. All outputs saved to: {self.output_dir}")
        
        return outputs
