#!/usr/bin/env python3
"""
Data Models and Structures for Fortran Code Commentator

This module contains all the data classes and structures used throughout the system.
Complete models for processing tasks, results, validation, and system health monitoring.
"""

from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple, Any
from dotenv import load_dotenv
import os

# Load environment variables from .env file
load_dotenv()


@dataclass
class GSICodeClassification:
    """GSI code classification structure"""
    background_grid: List[str] = field(default_factory=list)
    core_analysis: List[str] = field(default_factory=list)  
    io_interface: List[str] = field(default_factory=list)
    observation_processing: List[str] = field(default_factory=list)
    utilities: List[str] = field(default_factory=list)



@dataclass
class CommentStandard:
    """Structured comment standard templates for Fortran"""
    subroutine_template: str = ""
    function_template: str = ""
    module_template: str = ""
    parameter_tags: List[str] = field(default_factory=list)
    return_value_tags: List[str] = field(default_factory=list)
    example_tags: List[str] = field(default_factory=list)
    math_notation: str = ""



@dataclass
class PromptConfig:
    """Configuration for prompts"""
    base_prompt: str = ""
    classification_specific: str = ""
    background_context: str = ""
    focus_areas: List[str] = field(default_factory=list)
    comment_standard: Optional[CommentStandard] = None



@dataclass
class ProcessingTask:
    """Individual file processing task"""
    file_path: str
    classification: str
    line_count: int
    prompt_config: PromptConfig
    priority: int = 0



@dataclass
class ProcessingResults:
    """Results from processing tasks"""
    successful_files: List[str] = field(default_factory=list)
    failed_files: List[Tuple[str, str]] = field(default_factory=list)
    total_files: int = 0
    processing_time: float = 0.0
    statistics: Dict[str, Any] = field(default_factory=dict)



@dataclass
class WorkerStats:
    """Individual worker statistics"""
    worker_id: int
    files_processed: int = 0
    total_processing_time: float = 0.0
    average_file_time: float = 0.0
    current_task: Optional[str] = None
    last_completed_time: Optional[float] = None
    


@dataclass
class ProgressSnapshot:
    """Progress tracking snapshot"""
    timestamp: float
    completed_count: int
    total_count: int
    active_workers: int
    average_completion_rate: float
    estimated_remaining_time: float
    current_throughput: float



@dataclass
class DiffPatch:
    """Git diff patch representation"""
    file_path: str
    original_content: str
    patch_content: str
    classification: str
    is_valid: bool = False
    
    

@dataclass
class ValidationResult:
    """Result of diff validation"""
    is_valid: bool
    errors: List[str] = field(default_factory=list)
    warnings: List[str] = field(default_factory=list)
    suggested_fixes: List[str] = field(default_factory=list)



@dataclass
class BackupInfo:
    """Enhanced backup information with metadata tracking"""
    backup_path: str
    original_path: str
    timestamp: str
    file_size: int
    checksum: str
    is_valid: bool = True
    metadata: Dict[str, Any] = field(default_factory=dict)



@dataclass
class ApplicationResult:
    """Enhanced result of patch application"""
    success: bool
    error_message: Optional[str] = None
    backup_info: Optional[BackupInfo] = None
    applied_content: Optional[str] = None
    syntax_validation_passed: bool = False



@dataclass
class PatchError:
    """Detailed patch error information"""
    error_type: str  # SYNTAX_ERROR, LINE_MISMATCH, CONTEXT_ERROR, etc.
    error_message: str
    line_number: Optional[int] = None
    context: str = ""
    suggested_fix: Optional[str] = None
    severity: str = "HIGH"  # HIGH, MEDIUM, LOW - indicates recoverability
    recovery_strategy: Optional[str] = None  # Specific recovery approach



@dataclass
class DiffAttempt:
    """Track diff application attempts"""
    attempt_number: int
    patch_content: str
    error: Optional[PatchError] = None
    success: bool = False
    timestamp: str = ""
    backoff_time: float = 0.0  # Time waited before this attempt
    processing_duration: float = 0.0  # Time taken for this attempt



@dataclass
class ConnectionHealth:
    """Track connection health and statistics"""
    is_connected: bool = False
    last_success: Optional[float] = None
    last_failure: Optional[float] = None
    consecutive_failures: int = 0
    total_requests: int = 0
    successful_requests: int = 0
    average_response_time: float = 0.0
    current_load: int = 0  # Number of concurrent requests
    
    @property
    def success_rate(self) -> float:
        """Calculate success rate percentage"""
        if self.total_requests == 0:
            return 0.0
        return (self.successful_requests / self.total_requests) * 100.0
    
    @property
    def is_degraded(self) -> bool:
        """Check if connection is in degraded state"""
        return self.consecutive_failures >= 3 or self.success_rate < 50.0



@dataclass
class ModelCapabilities:
    """Track model capabilities and limitations"""
    max_tokens: int = 4096
    supports_streaming: bool = True
    max_concurrent_requests: int = 4
    estimated_tokens_per_second: float = 50.0
    context_window: int = 8192


@dataclass
class SystemConfiguration:
    """System configuration data model"""
    max_workers: int = 4
    max_file_lines: int = 1500
    llm_url: str = os.getenv('OLLAMA_HOST', 'http://60.245.128.27:11434')
    llm_model: str = os.getenv('OLLAMA_MODEL', 'hopephoto/qwen3-30b-a3b-instruct_q8:latest')
    llm_timeout: int = 300
    backup_enabled: bool = True
    retry_processed: bool = False
    validate_syntax: bool = False
    comment_style: str = "doxygen"
    log_level: str = "INFO"
    max_retry_attempts: int = 3
    progress_report_interval: int = 30
    target_classification: str = "auto"  # Filter files by classification: auto, core_analysis, etc.
