"""Base document loader interface."""

from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
import hashlib
from datetime import datetime


@dataclass
class Document:
    """Document data structure."""
    
    content: str
    metadata: Dict[str, Any]
    doc_id: Optional[str] = None
    source: Optional[str] = None
    created_at: Optional[datetime] = None
    
    def __post_init__(self):
        """Initialize document ID and timestamp."""
        if not self.doc_id:
            # Generate ID from content hash
            self.doc_id = hashlib.md5(self.content.encode()).hexdigest()
        
        if not self.created_at:
            self.created_at = datetime.utcnow()
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert document to dictionary.
        
        Returns:
            Document as dictionary
        """
        return {
            "doc_id": self.doc_id,
            "content": self.content,
            "metadata": self.metadata,
            "source": self.source,
            "created_at": self.created_at.isoformat() if self.created_at else None
        }


class BaseDocumentLoader(ABC):
    """Abstract base class for document loaders."""
    
    def __init__(
        self,
        chunk_size: int = 1000,
        chunk_overlap: int = 200,
        metadata_fields: Optional[List[str]] = None
    ):
        """Initialize document loader.
        
        Args:
            chunk_size: Size of text chunks
            chunk_overlap: Overlap between chunks
            metadata_fields: Fields to include in metadata
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.metadata_fields = metadata_fields or []
    
    @abstractmethod
    async def load(self, **kwargs) -> List[Document]:
        """Load documents from source.
        
        Args:
            **kwargs: Source-specific parameters
            
        Returns:
            List of documents
        """
        pass
    
    @abstractmethod
    async def load_by_id(self, doc_id: str) -> Optional[Document]:
        """Load a specific document by ID.
        
        Args:
            doc_id: Document ID
            
        Returns:
            Document or None if not found
        """
        pass
    
    def split_text(self, text: str) -> List[str]:
        """Split text into chunks.
        
        Args:
            text: Text to split
            
        Returns:
            List of text chunks
        """
        if len(text) <= self.chunk_size:
            return [text]
        
        chunks = []
        start = 0
        
        while start < len(text):
            # Find the end of the chunk
            end = start + self.chunk_size
            
            # If we're not at the end of the text, try to break at a sentence
            if end < len(text):
                # Look for sentence endings
                for delimiter in ['. ', '! ', '? ', '\n\n', '\n']:
                    last_delimiter = text.rfind(delimiter, start, end)
                    if last_delimiter != -1:
                        end = last_delimiter + len(delimiter)
                        break
            
            chunks.append(text[start:end])
            
            # Move start position with overlap
            start = end - self.chunk_overlap if end < len(text) else end
        
        return chunks
    
    def create_documents(
        self,
        texts: List[str],
        metadata: Optional[Dict[str, Any]] = None,
        source: Optional[str] = None
    ) -> List[Document]:
        """Create documents from texts.
        
        Args:
            texts: List of text contents
            metadata: Optional metadata for all documents
            source: Optional source identifier
            
        Returns:
            List of Document objects
        """
        documents = []
        base_metadata = metadata or {}
        
        for i, text in enumerate(texts):
            doc_metadata = {
                **base_metadata,
                "chunk_index": i,
                "total_chunks": len(texts)
            }
            
            doc = Document(
                content=text,
                metadata=doc_metadata,
                source=source
            )
            documents.append(doc)
        
        return documents
    
    async def preprocess(self, text: str) -> str:
        """Preprocess text before creating documents.
        
        Args:
            text: Raw text
            
        Returns:
            Preprocessed text
        """
        # Default preprocessing: strip whitespace and normalize newlines
        text = text.strip()
        text = text.replace('\r\n', '\n')
        text = text.replace('\r', '\n')
        
        # Remove excessive whitespace
        lines = text.split('\n')
        lines = [line.strip() for line in lines]
        text = '\n'.join(line for line in lines if line)
        
        return text
    
    def extract_metadata(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """Extract metadata from raw data.
        
        Args:
            data: Raw data dictionary
            
        Returns:
            Metadata dictionary
        """
        metadata = {}
        
        for field in self.metadata_fields:
            if field in data:
                metadata[field] = data[field]
        
        return metadata