"""
Vector store module using ChromaDB.
"""
import os
from typing import List, Dict, Optional
from datetime import datetime
import chromadb
from chromadb.config import Settings


class VectorStore:
    """Manages vector storage using ChromaDB."""
    
    def __init__(self, persist_directory: str = "./chroma_db"):
        """
        Initialize ChromaDB vector store.
        
        Args:
            persist_directory: Directory to persist ChromaDB data
        """
        self.persist_directory = persist_directory
        os.makedirs(persist_directory, exist_ok=True)
        
        # Initialize ChromaDB client with persistence
        # Using PersistentClient for ChromaDB 0.4.x
        try:
            from chromadb import PersistentClient
            self.client = PersistentClient(path=persist_directory)
        except ImportError:
            # Fallback for older versions
            self.client = chromadb.Client(
                Settings(
                    chroma_db_impl="duckdb+parquet",
                    persist_directory=persist_directory
                )
            )
        
        # Get or create collection for document chunks
        self.collection = self.client.get_or_create_collection(
            name="documents"
        )
        
        # Get or create collection for file metadata
        self.files_collection = self.client.get_or_create_collection(
            name="files_metadata"
        )
    
    def add_chunks(self, file_id: str, chunks: List[Dict[str, any]], embeddings: List[List[float]] = None, filename: str = None):
        """
        Add chunks to the vector store and record file metadata.
        
        Args:
            file_id: Unique file identifier
            chunks: List of chunk dictionaries with 'text' and 'metadata' keys
            embeddings: Optional pre-computed embeddings for the chunks
            filename: Original filename (optional, for metadata)
        """
        if not chunks:
            return
        
        # Prepare data for ChromaDB
        ids = []
        documents = []
        metadatas = []
        
        for chunk in chunks:
            chunk_id = f"{file_id}_chunk_{chunk['metadata']['chunk_index']}"
            ids.append(chunk_id)
            documents.append(chunk["text"])
            metadatas.append(chunk["metadata"])
        
        # Add to collection
        if embeddings:
            # Use provided embeddings
            self.collection.add(
                ids=ids,
                documents=documents,
                embeddings=embeddings,
                metadatas=metadatas
            )
        else:
            # ChromaDB will use default embedding function if available
            # Otherwise, embeddings need to be provided externally
            self.collection.add(
                ids=ids,
                documents=documents,
                metadatas=metadatas
            )
        
        # Record file metadata
        if filename:
            self.add_file_metadata(file_id, filename, len(chunks))
    
    def search(
        self,
        query_embeddings: List[List[float]],
        top_k: int = 4,
        where: Optional[Dict] = None
    ) -> Dict:
        """
        Search for similar chunks.
        
        Args:
            query_embeddings: Query embedding vectors
            top_k: Number of results to return
            where: Optional metadata filter
        
        Returns:
            Search results dictionary
        """
        results = self.collection.query(
            query_embeddings=query_embeddings,
            n_results=top_k,
            where=where
        )
        
        return results
    
    def add_file_metadata(self, file_id: str, filename: str, chunk_count: int):
        """
        Add or update file metadata.
        
        Args:
            file_id: Unique file identifier
            filename: Original filename
            chunk_count: Number of chunks for this file
        """
        upload_time = datetime.now().isoformat()
        
        # Store file metadata in files_collection
        # Use file_id as the document ID, and store metadata as document content
        self.files_collection.upsert(
            ids=[file_id],
            documents=[filename],  # Store filename as document
            metadatas=[{
                "file_id": file_id,
                "filename": filename,
                "chunk_count": chunk_count,
                "upload_time": upload_time
            }]
        )
    
    def delete_file(self, file_id: str) -> bool:
        """
        Delete a file and all its chunks from the vector store.
        
        Args:
            file_id: Unique file identifier
            
        Returns:
            True if file was found and deleted, False otherwise
        """
        # First, check if file exists
        try:
            results = self.files_collection.get(ids=[file_id])
            if not results or not results.get("ids") or len(results["ids"]) == 0:
                return False
        except Exception:
            return False
        # Delete all chunks for this file
        self._delete_chunks_by_file_id(file_id)
        
        # Delete file metadata
        try:
            self.files_collection.delete(ids=[file_id])
            return True
        except Exception as e:
            print(f"Error deleting file metadata for {file_id}: {e}")
            return False
    
    def update_file(self, file_id: str, chunks: List[Dict[str, any]], embeddings: List[List[float]] = None, filename: str = None) -> bool:
        """
        Update a file by deleting old chunks and adding new ones.
        
        Args:
            file_id: Unique file identifier
            chunks: List of new chunk dictionaries
            embeddings: Optional pre-computed embeddings for the chunks
            filename: Original filename (optional)
            
        Returns:
            True if file was updated successfully, False if file not found
        """
        # Check if file exists
        try:
            results = self.files_collection.get(ids=[file_id])
            if not results or not results.get("ids") or len(results["ids"]) == 0:
                return False
        except Exception:
            return False
        
        # Delete old chunks
        self._delete_chunks_by_file_id(file_id)
        
        # Add new chunks
        self.add_chunks(file_id, chunks, embeddings, filename)
        
        return True
    
    def list_files(self) -> List[Dict]:
        """
        List all files with their metadata.
        
        Returns:
            List of file metadata dictionaries
        """
        files_dict = {}
        
        # Load from metadata store first
        try:
            results = self.files_collection.get(include=["metadatas", "documents"])
            if results and results.get("ids"):
                ids = results["ids"]
                metadatas = results.get("metadatas", [])
                documents = results.get("documents", [])
                
                for i, file_id in enumerate(ids):
                    metadata = metadatas[i] if i < len(metadatas) else {}
                    filename = documents[i] if i < len(documents) else metadata.get("filename", "unknown")
                    
                    files_dict[file_id] = {
                        "file_id": file_id,
                        "filename": filename,
                        "chunk_count": metadata.get("chunk_count", 0),
                        "upload_time": metadata.get("upload_time", "")
                    }
        except Exception as e:
            print(f"Error loading file metadata: {e}")
        
        # Fallback: aggregate from chunk metadata for files without metadata records
        chunk_files = self._aggregate_files_from_chunks(update_metadata=not files_dict)
        for file_id, chunk_data in chunk_files.items():
            if file_id not in files_dict:
                files_dict[file_id] = chunk_data
        
        files = list(files_dict.values())
        files.sort(key=lambda x: x.get("upload_time", ""), reverse=True)
        return files
    
    def get_file_metadata(self, file_id: str) -> Optional[Dict]:
        """
        Get metadata for a specific file.
        
        Args:
            file_id: Unique file identifier
            
        Returns:
            File metadata dictionary or None if not found
        """
        try:
            results = self.files_collection.get(ids=[file_id], include=["metadatas", "documents"])
            if results and results.get("ids"):
                metadata = results.get("metadatas", [{}])[0] or {}
                filename = results.get("documents", [None])[0] or metadata.get("filename", "unknown")
                
                return {
                    "file_id": file_id,
                    "filename": filename,
                    "chunk_count": metadata.get("chunk_count", 0),
                    "upload_time": metadata.get("upload_time", "")
                }
        except Exception as e:
            print(f"Error getting file metadata for {file_id}: {e}")
        
        # Fallback to chunk aggregation
        chunk_files = self._aggregate_files_from_chunks()
        return chunk_files.get(file_id)
    
    def get_collection(self):
        """Get the ChromaDB collection."""
        return self.collection

    def _aggregate_files_from_chunks(self, update_metadata: bool = False) -> Dict[str, Dict]:
        """
        Aggregate file information from chunk metadata.
        
        Args:
            update_metadata: If True, update files_metadata collection with aggregated data.
        
        Returns:
            Dictionary mapping file_id to metadata.
        """
        files = {}
        try:
            results = self.collection.get(include=["metadatas"])
            metadatas = results.get("metadatas", []) if results else []
            
            for metadata in metadatas:
                if not metadata:
                    continue
                file_id = metadata.get("file_id")
                if not file_id:
                    continue
                
                file_entry = files.setdefault(file_id, {
                    "file_id": file_id,
                    "filename": metadata.get("filename", "unknown"),
                    "chunk_count": 0,
                    "upload_time": metadata.get("upload_time", "")
                })
                
                file_entry["chunk_count"] += 1
                # Prefer meaningful filename
                if file_entry["filename"] in (None, "unknown") and metadata.get("filename"):
                    file_entry["filename"] = metadata["filename"]
                # Preserve earliest upload time if available
                if not file_entry["upload_time"] and metadata.get("upload_time"):
                    file_entry["upload_time"] = metadata["upload_time"]
        except Exception as e:
            print(f"Error aggregating files from chunks: {e}")
        
        if update_metadata and files:
            for file_id, data in files.items():
                self.files_collection.upsert(
                    ids=[file_id],
                    documents=[data["filename"]],
                    metadatas=[{
                        "file_id": file_id,
                        "filename": data["filename"],
                        "chunk_count": data["chunk_count"],
                        "upload_time": data["upload_time"]
                    }]
                )
        
        return files

    def _delete_chunks_by_file_id(self, file_id: str) -> bool:
        """
        Delete all chunks associated with a given file ID.
        
        Returns:
            True if any chunks were deleted, False otherwise.
        """
        deleted = False
        
        # Primary approach: use metadata filter
        try:
            self.collection.delete(where={"file_id": file_id})
            deleted = True
        except Exception as e:
            print(f"Metadata-based delete failed for {file_id}: {e}")
        
        # Secondary approach: delete by ID prefix
        try:
            results = self.collection.get()
            if results and results.get("ids"):
                chunk_ids = [
                    chunk_id for chunk_id in results["ids"]
                    if chunk_id.startswith(f"{file_id}_chunk_")
                ]
                if chunk_ids:
                    self.collection.delete(ids=chunk_ids)
                    deleted = True
        except Exception as e:
            print(f"ID-prefix delete failed for {file_id}: {e}")
        
        return deleted

