import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from typing import Optional

from flask import Flask, current_app
from sqlalchemy.orm import load_only

# Removed: from configs import dify_config
# Import the local config directly or rely on Flask's current_app.config
# For simplicity in this refactoring, we'll assume Flask app context is available
# and current_app.config will be populated from our local config.py

from core.rag.data_post_processor.data_post_processor import DataPostProcessor
from core.rag.datasource.keyword.keyword_factory import Keyword
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.embedding.retrieval import RetrievalSegments
from core.rag.entities.metadata_entities import MetadataCondition
from core.rag.index_processor.constant.index_type import IndexType
from core.rag.models.document import Document
from core.rag.rerank.rerank_type import RerankMode
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from extensions import db
from models.dataset import ChildChunk, Dataset, DocumentSegment # Assuming these are the refactored models
from models.dataset import Document as DatasetDocument # Alias for clarity
# from services.external_knowledge_service import ExternalDatasetService # Assuming this is out of scope or refactored

default_retrieval_model = {
    "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
    "reranking_enable": False,
    "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
    "top_k": 2,
    "score_threshold_enabled": False,
}


class RetrievalService:
    @classmethod
    def retrieve(
        cls,
        retrieval_method: str,
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float] = 0.0,
        reranking_model: Optional[dict] = None,
        reranking_mode: str = "reranking_model",
        weights: Optional[dict] = None,
        document_ids_filter: Optional[list[str]] = None,
    ):
        if not query:
            return []
        dataset = cls._get_dataset(dataset_id)
        if not dataset:
            return []

        all_documents: list[Document] = []
        exceptions: list[str] = []
        
        # Use RETRIEVAL_SERVICE_EXECUTORS from app config
        max_workers = current_app.config.get("RETRIEVAL_SERVICE_EXECUTORS", 2) 

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            futures = []
            if retrieval_method == "keyword_search": # Assuming this is a valid distinct method
                futures.append(
                    executor.submit(
                        cls.keyword_search,
                        flask_app=current_app._get_current_object(),
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        all_documents=all_documents,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                    )
                )
            if RetrievalMethod.is_support_semantic_search(retrieval_method):
                futures.append(
                    executor.submit(
                        cls.embedding_search,
                        flask_app=current_app._get_current_object(),
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        score_threshold=score_threshold,
                        reranking_model=reranking_model,
                        all_documents=all_documents,
                        retrieval_method=retrieval_method,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                    )
                )
            if RetrievalMethod.is_support_fulltext_search(retrieval_method):
                futures.append(
                    executor.submit(
                        cls.full_text_index_search,
                        flask_app=current_app._get_current_object(),
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        score_threshold=score_threshold,
                        reranking_model=reranking_model,
                        all_documents=all_documents,
                        retrieval_method=retrieval_method,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                    )
                )
            concurrent.futures.wait(futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)

        if exceptions:
            # Consider logging exceptions here as well
            raise ValueError(";\n".join(exceptions))

        if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
            # Assuming tenant_id is part of dataset or can be a default for standalone
            tenant_id_for_post_processor = dataset.tenant_id if hasattr(dataset, 'tenant_id') else 'standalone_tenant'
            data_post_processor = DataPostProcessor(
                tenant_id_for_post_processor, reranking_mode, reranking_model, weights, False
            )
            all_documents = data_post_processor.invoke(
                query=query,
                documents=all_documents,
                score_threshold=score_threshold,
                top_n=top_k,
            )

        return all_documents

    # @classmethod
    # def external_retrieve(...): # Commenting out as ExternalDatasetService was marked out of scope
    #     pass

    @classmethod
    def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]: # Ensure Dataset is the correct refactored model
        return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()

    @classmethod
    def keyword_search(
        cls,
        flask_app: Flask, # Pass Flask app explicitly
        dataset_id: str,
        query: str,
        top_k: int,
        all_documents: list,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
    ):
        with flask_app.app_context(): # Ensure app context for db operations
            try:
                dataset = cls._get_dataset(dataset_id)
                if not dataset:
                    raise ValueError("dataset not found")

                keyword_processor = Keyword(dataset=dataset) # Ensure Keyword class is refactored

                documents = keyword_processor.search(
                    cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
                )
                all_documents.extend(documents)
            except Exception as e:
                exceptions.append(f"Keyword search error: {e}")

    @classmethod
    def embedding_search(
        cls,
        flask_app: Flask, # Pass Flask app explicitly
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float],
        reranking_model: Optional[dict],
        all_documents: list,
        retrieval_method: str,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
    ):
        with flask_app.app_context(): # Ensure app context
            try:
                dataset = cls._get_dataset(dataset_id)
                if not dataset:
                    raise ValueError("dataset not found")
                
                # This part assumes dataset.get_vector_index() is implemented and returns a valid vector_processor
                # This is a critical piece that needs to be working in the Dataset model.
                if not hasattr(dataset, 'get_vector_index') or not callable(dataset.get_vector_index):
                    raise NotImplementedError("Dataset model does not have a get_vector_index method.")
                
                vector_processor = dataset.get_vector_index() # This should return an instance of a vector DB client
                if not vector_processor:
                     raise ValueError("Failed to get vector index for dataset.")

                documents = vector_processor.search_by_vector(
                    query,
                    search_type="similarity_score_threshold", # This might vary based on vector DB
                    top_k=top_k,
                    score_threshold=score_threshold,
                    # filter might need to be adapted based on vector DB specifics and dataset model
                    filter={"group_id": [dataset.id]} if hasattr(dataset, 'id') else None, 
                    document_ids_filter=document_ids_filter,
                )

                if documents:
                    # Simplified reranking logic for now
                    all_documents.extend(documents)
            except Exception as e:
                exceptions.append(f"Embedding search error: {e}")

    @classmethod
    def full_text_index_search(
        cls,
        flask_app: Flask, # Pass Flask app explicitly
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float],
        reranking_model: Optional[dict],
        all_documents: list,
        retrieval_method: str,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
    ):
        with flask_app.app_context(): # Ensure app context
            try:
                dataset = cls._get_dataset(dataset_id)
                if not dataset:
                    raise ValueError("dataset not found")
                
                if not hasattr(dataset, 'get_vector_index') or not callable(dataset.get_vector_index):
                    raise NotImplementedError("Dataset model does not have a get_vector_index method for full text search.")

                vector_processor = dataset.get_vector_index() # This should return an instance of a vector DB client
                if not vector_processor:
                     raise ValueError("Failed to get vector index for dataset for full text search.")

                # Ensure the vector_processor has a search_by_full_text method
                if not hasattr(vector_processor, 'search_by_full_text') or not callable(vector_processor.search_by_full_text):
                    raise NotImplementedError("Vector index does not support search_by_full_text method.")

                documents = vector_processor.search_by_full_text(
                    cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
                )
                if documents:
                    # Simplified reranking logic for now
                    all_documents.extend(documents)
            except Exception as e:
                exceptions.append(f"Full text search error: {e}")

    @staticmethod
    def escape_query_for_search(query: str) -> str:
        return query.replace("\"", "\\\"") # Escape double quotes

    @classmethod
    def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
        if not documents:
            return []
        # This method needs careful review to ensure all model interactions (DatasetDocument, DocumentSegment, ChildChunk)
        # are using the refactored, standalone versions and that db sessions are handled correctly.
        # For brevity, the internal logic is kept similar but would need testing in the standalone context.
        try:
            document_ids = {doc.metadata.get("document_id") for doc in documents if doc.metadata and "document_id" in doc.metadata}
            if not document_ids:
                return []

            dataset_documents_map = {
                doc.id: doc
                for doc in db.session.query(DatasetDocument)
                .filter(DatasetDocument.id.in_(list(document_ids)))
                .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id, DatasetDocument.name, DatasetDocument.data_source_type, DatasetDocument.doc_metadata))
                .all()
            }

            records = []
            processed_segment_ids = set()

            for rag_doc in documents: # rag_doc is core.rag.models.document.Document
                doc_metadata = rag_doc.metadata
                if not doc_metadata or "document_id" not in doc_metadata or "segment_id" not in doc_metadata:
                    continue
                
                db_doc_id = doc_metadata["document_id"]
                db_segment_id = doc_metadata["segment_id"]

                if db_segment_id in processed_segment_ids:
                    continue # Avoid processing the same segment multiple times if it appears from different sources

                dataset_document_model = dataset_documents_map.get(db_doc_id)
                if not dataset_document_model:
                    continue

                segment_model = (
                    db.session.query(DocumentSegment)
                    .filter(
                        DocumentSegment.id == db_segment_id,
                        DocumentSegment.dataset_id == dataset_document_model.dataset_id,
                        DocumentSegment.enabled == True,
                        DocumentSegment.status == "completed",
                    )
                    .first()
                )

                if not segment_model:
                    continue
                
                # Add document name and data source type to the RAG document metadata for the final result
                rag_doc.metadata["document_name"] = dataset_document_model.name
                rag_doc.metadata["data_source_type"] = dataset_document_model.data_source_type
                rag_doc.metadata["dataset_id"] = dataset_document_model.dataset_id # Ensure dataset_id is in metadata

                # The RetrievalSegments class might need to be simplified or adapted
                # For now, we are returning a list of RAGDocuments (core.rag.models.document.Document)
                # directly from the search methods. If RetrievalSegments is a specific DTO, this part needs adjustment.
                # This simplified version will just pass through the RAGDocument with enriched metadata.
                # records.append(RetrievalSegments(segment=segment_model, score=rag_doc.metadata.get("score")))
                
                # If the goal is to return the RAGDocument objects themselves, this function might not be needed
                # or should return a list of these enriched RAGDocuments.
                # For now, let's assume the search methods (embedding_search etc.) return RAGDocuments
                # and this function is meant to further process/format them if needed.
                # Given the current perform_rag_query in dataset_service_refactored.py expects a list of dicts,
                # it might be better to transform RAGDocument to dict here or there.

                processed_segment_ids.add(db_segment_id)
            
            # The original function returned List[RetrievalSegments]. 
            # If the goal is to return the raw RAG documents (which now have enriched metadata),
            # then this function might just return the input `documents` list after enrichment, 
            # or be removed if enrichment happens elsewhere.
            # For now, returning the enriched documents directly.
            return documents # Returning List[core.rag.models.document.Document]

        except Exception as e:
            current_app.logger.error(f"Error formatting retrieval documents: {e}")
            return [] # Return empty list on error

