"""
Document model for document analysis system.

Represents source documents being analyzed with metadata,
content information, and analysis relationships.
"""

import hashlib
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Any
import json


class DocumentType(Enum):
    """Supported document types."""
    MARKDOWN = "markdown"
    TEXT = "text"
    PDF = "pdf"
    WORD = "word"


@dataclass
class Document:
    """Represents a source document for analysis."""

    # Core attributes
    id: str
    file_path: Path
    file_name: str
    document_type: DocumentType

    # Metadata
    title: Optional[str] = None
    author: Optional[str] = None
    created_date: Optional[datetime] = None
    modified_date: Optional[datetime] = None
    file_size_bytes: int = 0

    # Content
    content: str = ""
    word_count: int = 0
    paragraph_count: int = 0
    heading_count: int = 0
    sentence_count: int = 0

    # Analysis metadata
    analyzed_date: Optional[datetime] = None
    analysis_version: str = "1.0"
    checksum: str = ""

    # Additional metadata
    custom_metadata: Dict[str, Any] = field(default_factory=dict)

    # Relationships (populated from other entities)
    _analysis_result: Optional['AnalysisResult'] = field(default=None, repr=False)

    def __post_init__(self):
        """Calculate derived attributes after initialization."""
        # Ensure file_path is Path object
        if isinstance(self.file_path, str):
            self.file_path = Path(self.file_path)

        # Extract file information if path exists
        if self.file_path.exists():
            self._extract_file_info()

        # Calculate content metrics
        if self.content:
            self._calculate_content_metrics()

        # Generate checksum
        self._generate_checksum()

    def _extract_file_info(self):
        """Extract file information from filesystem."""
        try:
            stat = self.file_path.stat()
            self.file_size_bytes = stat.st_size
            self.modified_date = datetime.fromtimestamp(stat.st_mtime)
            self.created_date = datetime.fromtimestamp(stat.st_ctime)

            # Extract file name if not provided
            if not self.file_name:
                self.file_name = self.file_path.name

            # Extract title from filename if not provided
            if not self.title:
                stem = self.file_path.stem
                self.title = stem.replace('_', ' ').replace('-', ' ').title()

        except (OSError, IOError) as e:
            # Log error but don't fail document creation
            import logging
            logger = logging.getLogger(__name__)
            logger.warning(f"Failed to extract file info for {self.file_path}: {e}")

    def _calculate_content_metrics(self):
        """Calculate content metrics from document content."""
        if not self.content:
            return

        # Word count
        import re
        words = re.findall(r'\b\w+\b', self.content)
        self.word_count = len(words)

        # Paragraph count (double newlines)
        paragraphs = [p for p in self.content.split('\n\n') if p.strip()]
        self.paragraph_count = len(paragraphs)

        # Heading count (markdown style)
        headings = [line for line in self.content.split('\n')
                   if line.strip().startswith('#')]
        self.heading_count = len(headings)

        # Sentence count (basic detection)
        sentences = re.split(r'[.!?]+', self.content)
        self.sentence_count = len([s for s in sentences if s.strip()])

    def _generate_checksum(self):
        """Generate MD5 checksum for content integrity."""
        if self.content:
            self.checksum = hashlib.md5(
                self.content.encode('utf-8')
            ).hexdigest()

    def update_content(self, new_content: str):
        """Update document content and recalculate metrics.

        Args:
            new_content: New document content
        """
        self.content = new_content
        self._calculate_content_metrics()
        self._generate_checksum()
        self.analyzed_date = None  # Reset analysis date

    def get_content_preview(self, max_length: int = 200) -> str:
        """Get a preview of document content.

        Args:
            max_length: Maximum length of preview

        Returns:
            Content preview string
        """
        if not self.content:
            return ""

        preview = self.content[:max_length]
        if len(self.content) > max_length:
            preview += "..."
        return preview

    def get_file_extension(self) -> str:
        """Get file extension.

        Returns:
            File extension (with dot)
        """
        return self.file_path.suffix.lower()

    def is_supported_type(self) -> bool:
        """Check if document type is supported.

        Returns:
            True if supported
        """
        return self.document_type in DocumentType

    def get_size_mb(self) -> float:
        """Get file size in megabytes.

        Returns:
            File size in MB
        """
        return self.file_size_bytes / (1024 * 1024)

    def to_dict(self) -> Dict[str, Any]:
        """Convert document to dictionary.

        Returns:
            Dictionary representation
        """
        return {
            "id": self.id,
            "file_path": str(self.file_path),
            "file_name": self.file_name,
            "document_type": self.document_type.value,
            "title": self.title,
            "author": self.author,
            "created_date": self.created_date.isoformat() if self.created_date else None,
            "modified_date": self.modified_date.isoformat() if self.modified_date else None,
            "file_size_bytes": self.file_size_bytes,
            "content": self.content,
            "word_count": self.word_count,
            "paragraph_count": self.paragraph_count,
            "heading_count": self.heading_count,
            "sentence_count": self.sentence_count,
            "analyzed_date": self.analyzed_date.isoformat() if self.analyzed_date else None,
            "analysis_version": self.analysis_version,
            "checksum": self.checksum,
            "custom_metadata": self.custom_metadata
        }

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'Document':
        """Create document from dictionary.

        Args:
            data: Dictionary data

        Returns:
            Document instance
        """
        # Convert string dates to datetime objects
        if data.get("created_date"):
            data["created_date"] = datetime.fromisoformat(data["created_date"])
        if data.get("modified_date"):
            data["modified_date"] = datetime.fromisoformat(data["modified_date"])
        if data.get("analyzed_date"):
            data["analyzed_date"] = datetime.fromisoformat(data["analyzed_date"])

        # Convert document type string to enum
        if isinstance(data.get("document_type"), str):
            data["document_type"] = DocumentType(data["document_type"])

        # Convert file path string to Path object
        if isinstance(data.get("file_path"), str):
            data["file_path"] = Path(data["file_path"])

        return cls(**data)

    @classmethod
    def create_from_file(cls, file_path: Path, document_type: DocumentType = None) -> 'Document':
        """Create document from file.

        Args:
            file_path: Path to document file
            document_type: Document type (auto-detected if None)

        Returns:
            Document instance
        """
        file_path = Path(file_path)

        # Auto-detect document type if not provided
        if document_type is None:
            document_type = cls._detect_document_type(file_path)

        # Generate unique ID
        doc_id = cls._generate_document_id(file_path)

        # Read content
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
        except UnicodeDecodeError:
            # Try with different encoding
            with open(file_path, 'r', encoding='gbk') as f:
                content = f.read()

        return cls(
            id=doc_id,
            file_path=file_path,
            file_name=file_path.name,
            document_type=document_type,
            content=content
        )

    @staticmethod
    def _detect_document_type(file_path: Path) -> DocumentType:
        """Auto-detect document type from file extension.

        Args:
            file_path: Path to file

        Returns:
            Detected document type
        """
        extension = file_path.suffix.lower()

        type_mapping = {
            '.md': DocumentType.MARKDOWN,
            '.markdown': DocumentType.MARKDOWN,
            '.txt': DocumentType.TEXT,
            '.pdf': DocumentType.PDF,
            '.doc': DocumentType.WORD,
            '.docx': DocumentType.WORD
        }

        return type_mapping.get(extension, DocumentType.TEXT)

    @staticmethod
    def _generate_document_id(file_path: Path) -> str:
        """Generate unique document ID from file path.

        Args:
            file_path: Path to file

        Returns:
            Unique document ID
        """
        # Use absolute path and timestamp for uniqueness
        abs_path = str(file_path.absolute())
        timestamp = datetime.now().isoformat()
        combined = f"{abs_path}_{timestamp}"
        return hashlib.md5(combined.encode('utf-8')).hexdigest()[:16]

    def __str__(self) -> str:
        """String representation."""
        return f"Document(id={self.id}, name={self.file_name}, type={self.document_type.value})"

    def __eq__(self, other) -> bool:
        """Equality comparison based on ID."""
        if not isinstance(other, Document):
            return False
        return self.id == other.id

    def __hash__(self) -> int:
        """Hash based on ID."""
        return hash(self.id)


# Import here to avoid circular imports
try:
    from .analysis_result import AnalysisResult
except ImportError:
    # Fallback for type hints
    AnalysisResult = None