import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import time
from typing import Optional

from flask import Flask, current_app
import requests
from sqlalchemy.orm import load_only
from extensions.ext_database import db


default_retrieval_model = {
    "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
    "reranking_enable": False,
    "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
    "top_k": 2,
    "score_threshold_enabled": False,
}
from collections import ChainMap

class IntentRetrievalService:

    url = "http://localhost:8080/v1/graphql"
    # 构建 GraphQL 查询语句（作为字符串）
    graphql_query = """{{
    Get {{
        {CLASS_NAME}(
        where:{WHERE_CONDITION},
          nearVector:{{
                       vector:[{VECTOR_VALUES}],
                       distance:{DISTANCE}
                    }},
          limit:{LIMIT}
        )
        {{
        _additional{{id distance}}
        {ATTRIBUTES}
        }}
     }}
    }}
    """

    @staticmethod
    def _get_weavite_url_header()->tuple[str,dict]:
        url = f"{dify_config.WEAVIATE_ENDPOINT}/v1/graphql"
        header={
            "Authorization":f"Bearer {dify_config.WEAVIATE_API_KEY}",
            "Content-Type":"application/json"
        }
        return url,header
    
    @classmethod
    def intent_predict(cls,dataset_id,query:str,attributes:list[str],document_ids_filter)->list[Document]:
        time1 = time.perf_counter()
        tenant_id, class_name,threshold,top_k = cls._get_search_parameters(dataset_id, attributes)
        if dataset_id not in IntentRetrievalService.ds_vector:
            clear_cache("_get_search_parameters",keys=[dataset_id,str(attributes)])
            tenant_id, class_name,threshold,top_k = cls._get_search_parameters(dataset_id, attributes)
            if dataset_id not in IntentRetrievalService.ds_vector:
                raise Exception("dataset_id not found")
        ds_data = IntentRetrievalService.ds_vector[dataset_id]
        if not ds_data:
            return []
        vector,_,_=ds_data
        time1 = time.perf_counter()
        vector_data = vector._embeddings.embed_query(query)
        logging.debug(
            f"embedding query:{(time.perf_counter()-time1)*1000}")
        time1 = time.perf_counter()
        json_data  = IntentRetrievalService.search_by_vector(class_name,vector_data,threshold,top_k,document_ids_filter)
        logging.debug(
            f"search by vector:{(time.perf_counter()-time1)*1000}")
        time1 = time.perf_counter()
        all_documents: list[Document] = []
        if not json_data or "data" not in json_data or "Get" not in json_data["data"]:
            return []
        gets = json_data["data"]["Get"]
        for key, value in gets.items():
            for item in value:
                text = item.pop("text")
                item["score"] = 1 - item["_additional"]["distance"]
                all_documents.append(Document(page_content=text, metadata=item))
        # all_documents = sorted(all_documents, key=lambda x: x.metadata["score"], reverse=True)
        logging.info(f"Intent predict:all_documents[{len(all_documents)}],lantency:{time.perf_counter()-time1}")
        return all_documents

    @classmethod
    @cache_result(keys=["dataset_id", "attributes"],expire=1800)
    def _get_search_parameters(cls, dataset_id, attributes):
        time1 = time.perf_counter()
        # vector_tuple = IntentRetrievalService.ds_vector.get(
        #             str(dataset_id), None)
        # if vector_tuple is None:
        dataset = cls._get_dataset(dataset_id)
        logging.debug(
            f"get dataset:{(time.perf_counter() - time1)*1000}")
        time2 = time.perf_counter()
        if not dataset:
            raise ValueError("dataset not found")

        vector = Vector(dataset=dataset, attributes=attributes)
        
        class_name = None
        if dataset.index_struct_dict and "vector_store" in dataset.index_struct_dict and "class_prefix" in dataset.index_struct_dict["vector_store"]:
            class_name = dataset.index_struct_dict["vector_store"]["class_prefix"]
        if not class_name:
            raise ValueError("class_name not found")
        IntentRetrievalService.ds_vector[dataset_id]=vector,dataset.tenant_id,class_name
        logging.debug(
            f"init vector:{(time.perf_counter()-time2)*1000}")
        threshold = dataset.retrieval_model_dict.get("score_threshold",0.8)
        top_k = dataset.retrieval_model_dict.get("top_k", 30)
        # IntentRetrievalService.ds_vector[str(dataset_id)] = vector, str(dataset.tenant_id),class_name
        # else:
        #     vector, tenant_id,class_name = vector_tuple
        return dataset.tenant_id,class_name,threshold,top_k

    

    @staticmethod
    def search_by_vector(CLASS_NAME: str, vector: list[float], threshold: float, top_k: int, document_ids_filter: list[str], attributes: list[str] = ['cmd_id', 'term_id', 'document_id', 'doc_id', 'text'])->dict:
        # CLASS_NAME = 'Vector_index_6991e026_eb92_4459_92a6_ccff7efd9800_Node'
        if document_ids_filter:
            operands = []
            for document_id_filter in document_ids_filter:
                operands.append(
                    f"{{path:[\"document_id\"],operator:Equal,valueText:\"{document_id_filter}\"}}")
                    # {"path": ["document_id"], "operator": "Equal", "valueText": document_id_filter})
            where_filter = f"{{operator: Or,operands:[{",".join(operands)}]}}"
            # where_filter = json.dumps(where_filter,ensure_ascii=False,)
        else:
            where_filter = ''
        VECTOR_VALUES = ", ".join(str(x) for x in vector)
        distance = 1.0-threshold
        attr_str = " ".join(attributes)
        query = IntentRetrievalService.graphql_query.format_map(ChainMap(
            {"CLASS_NAME": CLASS_NAME,
              "LIMIT": top_k,
              "WHERE_CONDITION": where_filter,
              "VECTOR_VALUES": VECTOR_VALUES,
              "DISTANCE": distance,
              "ATTRIBUTES":attr_str}))
        url,header = IntentRetrievalService._get_weavite_url_header()
        response = requests.post(url,json={"query": query},headers=header)
        if response.status_code == 200:
            return response.json()  # success
        raise Exception("Query was not successful", response)

    # Cache precompiled regular expressions to avoid repeated compilation

    @classmethod
    def retrieve(
        cls,
        tenant_id: str,
        retrieval_method: str,
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float] = 0.0,
        reranking_model: Optional[dict] = None,
        reranking_mode: str = "reranking_model",
        weights: Optional[dict] = None,
        document_ids_filter: Optional[list[str]] = None,
        attributes: Optional[list[str]] = None,
    ):
        if not query:
            return []
        # dataset = cls._get_dataset(dataset_id)
        # if not dataset:
        #     return []

        all_documents: list[Document] = []
        exceptions: list[str] = []

        # Optimize multithreading with thread pools
        # type: ignore
        with ThreadPoolExecutor(max_workers=dify_config.RETRIEVAL_SERVICE_EXECUTORS) as executor:
            futures = []
            if retrieval_method == "keyword_search":
                futures.append(
                    executor.submit(
                        cls.keyword_search,
                        flask_app=current_app._get_current_object(),  # type: ignore
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        all_documents=all_documents,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                    )
                )
            if RetrievalMethod.is_support_semantic_search(retrieval_method):
                futures.append(
                    executor.submit(
                        cls.embedding_search,
                        flask_app=current_app._get_current_object(),  # type: ignore
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        score_threshold=score_threshold,
                        reranking_model=reranking_model,
                        all_documents=all_documents,
                        retrieval_method=retrieval_method,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                        attributes=attributes
                    )
                )
            if RetrievalMethod.is_support_fulltext_search(retrieval_method):
                futures.append(
                    executor.submit(
                        cls.full_text_index_search,
                        flask_app=current_app._get_current_object(),  # type: ignore
                        dataset_id=dataset_id,
                        query=query,
                        top_k=top_k,
                        score_threshold=score_threshold,
                        reranking_model=reranking_model,
                        all_documents=all_documents,
                        retrieval_method=retrieval_method,
                        exceptions=exceptions,
                        document_ids_filter=document_ids_filter,
                    )
                )
            concurrent.futures.wait(
                futures, timeout=30, return_when=concurrent.futures.ALL_COMPLETED)

        if exceptions:
            raise ValueError(";\n".join(exceptions))

        if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
            data_post_processor = DataPostProcessor(
                str(tenant_id), reranking_mode, reranking_model, weights, False
            )
            all_documents = data_post_processor.invoke(
                query=query,
                documents=all_documents,
                score_threshold=score_threshold,
                top_n=top_k,
            )

        return all_documents

    @classmethod
    def external_retrieve(cls, dataset_id: str, query: str, external_retrieval_model: Optional[dict] = None):
        dataset = db.session.query(Dataset).filter(
            Dataset.id == dataset_id).first()
        if not dataset:
            return []
        all_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
            dataset.tenant_id, dataset_id, query, external_retrieval_model or {}
        )
        return all_documents

    @classmethod
    def _get_dataset(cls, dataset_id: str) -> Optional[Dataset]:
        return db.session.query(Dataset).filter(Dataset.id == dataset_id).first()

    @classmethod
    def keyword_search(
        cls,
        flask_app: Flask,
        dataset_id: str,
        query: str,
        top_k: int,
        all_documents: list,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
    ):
        with flask_app.app_context():
            try:
                dataset = cls._get_dataset(dataset_id)
                if not dataset:
                    raise ValueError("dataset not found")

                keyword = Keyword(dataset=dataset)

                documents = keyword.search(
                    cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
                )
                all_documents.extend(documents)
            except Exception as e:
                exceptions.append(str(e))

    ds_vector: dict[str, tuple[Vector, str,str]] = {}

    @classmethod
    def embedding_search(
        cls,
        flask_app: Flask,
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float],
        reranking_model: Optional[dict],
        all_documents: list,
        retrieval_method: str,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
        attributes: Optional[list[str]] = None,
    ):
        with flask_app.app_context():
            try:
                time1 = time.perf_counter()
                vector_tuple = IntentRetrievalService.ds_vector.get(
                    str(dataset_id), None)
                if vector_tuple is None:
                    dataset = cls._get_dataset(dataset_id)
                    logging.debug(
                        f"get dataset:{(time.perf_counter() - time1)*1000}")
                    time2 = time.perf_counter()
                    if not dataset:
                        raise ValueError("dataset not found")

                    vector = Vector(dataset=dataset, attributes=attributes)
                    class_name = None
                    if dataset.index_struct_dict and "vector_store" in dataset.index_struct_dict and "class_prefix" in dataset.index_struct_dict["vector_store"]:
                        class_name = dataset.index_struct_dict["vector_store"]["class_prefix"]
                    logging.debug(
                        f"init vector:{(time.perf_counter()-time2)*1000}")
                    IntentRetrievalService.ds_vector[str(
                        dataset_id)] = vector, str(dataset.tenant_id),class_name
                else:
                    vector, tenant_id,class_name = vector_tuple
                logging.debug(
                    f"get vector from map:{(time.perf_counter() - time1)*1000}")
                time1 = time.perf_counter()
                documents = vector.search_by_vector(
                    query,
                    search_type="similarity_score_threshold",
                    top_k=top_k,
                    score_threshold=score_threshold,
                    filter={"group_id": [dataset_id]},
                    document_ids_filter=document_ids_filter,
                )
                logging.debug(
                    f"search_by_vector:{(time.perf_counter() - time1)*1000}")
                time1 = time.perf_counter()
                if documents:
                    if (
                        reranking_model
                        and reranking_model.get("reranking_model_name")
                        and reranking_model.get("reranking_provider_name")
                        and retrieval_method == RetrievalMethod.SEMANTIC_SEARCH.value
                    ):
                        data_post_processor = DataPostProcessor(
                            str(tenant_id), str(
                                RerankMode.RERANKING_MODEL.value), reranking_model, None, False
                        )
                        all_documents.extend(
                            data_post_processor.invoke(
                                query=query,
                                documents=documents,
                                score_threshold=score_threshold,
                                top_n=len(documents),
                            )
                        )
                        logging.debug(
                            f"Reorder documents:{(time.perf_counter() - time1)*1000}")
                    else:
                        all_documents.extend(documents)
            except Exception as e:
                exceptions.append(str(e))

    @classmethod
    def full_text_index_search(
        cls,
        flask_app: Flask,
        dataset_id: str,
        query: str,
        top_k: int,
        score_threshold: Optional[float],
        reranking_model: Optional[dict],
        all_documents: list,
        retrieval_method: str,
        exceptions: list,
        document_ids_filter: Optional[list[str]] = None,
        attributes: Optional[list[str]] = None,
    ):
        with flask_app.app_context():
            try:
                dataset = cls._get_dataset(dataset_id)
                if not dataset:
                    raise ValueError("dataset not found")

                vector_processor = Vector(
                    dataset=dataset, attributes=attributes)

                documents = vector_processor.search_by_full_text(
                    cls.escape_query_for_search(query), top_k=top_k, document_ids_filter=document_ids_filter
                )
                if documents:
                    if (
                        reranking_model
                        and reranking_model.get("reranking_model_name")
                        and reranking_model.get("reranking_provider_name")
                        and retrieval_method == RetrievalMethod.FULL_TEXT_SEARCH.value
                    ):
                        data_post_processor = DataPostProcessor(
                            str(dataset.tenant_id), str(
                                RerankMode.RERANKING_MODEL.value), reranking_model, None, False
                        )
                        all_documents.extend(
                            data_post_processor.invoke(
                                query=query,
                                documents=documents,
                                score_threshold=score_threshold,
                                top_n=len(documents),
                            )
                        )
                    else:
                        all_documents.extend(documents)
            except Exception as e:
                exceptions.append(str(e))

    @staticmethod
    def escape_query_for_search(query: str) -> str:
        return query.replace('"', '\\"')

    @classmethod
    def format_retrieval_documents(cls, documents: list[Document]) -> list[RetrievalSegments]:
        """Format retrieval documents with optimized batch processing"""
        if not documents:
            return []

        try:
            # Collect document IDs
            document_ids = {doc.metadata.get(
                "document_id") for doc in documents if "document_id" in doc.metadata}
            if not document_ids:
                return []

            # Batch query dataset documents
            dataset_documents = {
                doc.id: doc
                for doc in db.session.query(DatasetDocument)
                .filter(DatasetDocument.id.in_(document_ids))
                .options(load_only(DatasetDocument.id, DatasetDocument.doc_form, DatasetDocument.dataset_id))
                .all()
            }

            records = []
            include_segment_ids = set()
            segment_child_map = {}

            # Process documents
            for document in documents:
                document_id = document.metadata.get("document_id")
                if document_id not in dataset_documents:
                    continue

                dataset_document = dataset_documents[document_id]
                if not dataset_document:
                    continue

                if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
                    # Handle parent-child documents
                    child_index_node_id = document.metadata.get("doc_id")

                    child_chunk = (
                        db.session.query(ChildChunk).filter(
                            ChildChunk.index_node_id == child_index_node_id).first()
                    )

                    if not child_chunk:
                        continue

                    segment = (
                        db.session.query(DocumentSegment)
                        .filter(
                            DocumentSegment.dataset_id == dataset_document.dataset_id,
                            DocumentSegment.enabled == True,
                            DocumentSegment.status == "completed",
                            DocumentSegment.id == child_chunk.segment_id,
                        )
                        .options(
                            load_only(
                                DocumentSegment.id,
                                DocumentSegment.content,
                                DocumentSegment.answer,
                            )
                        )
                        .first()
                    )

                    if not segment:
                        continue

                    if segment.id not in include_segment_ids:
                        include_segment_ids.add(segment.id)
                        child_chunk_detail = {
                            "id": child_chunk.id,
                            "content": child_chunk.content,
                            "position": child_chunk.position,
                            "score": document.metadata.get("score", 0.0),
                        }
                        map_detail = {
                            "max_score": document.metadata.get("score", 0.0),
                            "child_chunks": [child_chunk_detail],
                        }
                        segment_child_map[segment.id] = map_detail
                        record = {
                            "segment": segment,
                        }
                        records.append(record)
                    else:
                        child_chunk_detail = {
                            "id": child_chunk.id,
                            "content": child_chunk.content,
                            "position": child_chunk.position,
                            "score": document.metadata.get("score", 0.0),
                        }
                        segment_child_map[segment.id]["child_chunks"].append(
                            child_chunk_detail)
                        segment_child_map[segment.id]["max_score"] = max(
                            segment_child_map[segment.id]["max_score"], document.metadata.get(
                                "score", 0.0)
                        )
                else:
                    # Handle normal documents
                    index_node_id = document.metadata.get("doc_id")
                    if not index_node_id:
                        continue

                    segment = (
                        db.session.query(DocumentSegment)
                        .filter(
                            DocumentSegment.dataset_id == dataset_document.dataset_id,
                            DocumentSegment.enabled == True,
                            DocumentSegment.status == "completed",
                            DocumentSegment.index_node_id == index_node_id,
                        )
                        .first()
                    )

                    if not segment:
                        continue

                    include_segment_ids.add(segment.id)
                    record = {
                        "segment": segment,
                        # type: ignore
                        "score": document.metadata.get("score"),
                    }
                    records.append(record)

            # Add child chunks information to records
            for record in records:
                if record["segment"].id in segment_child_map:
                    record["child_chunks"] = segment_child_map[record["segment"].id].get(
                        "child_chunks")  # type: ignore
                    record["score"] = segment_child_map[record["segment"].id]["max_score"]

            return [RetrievalSegments(**record) for record in records]
        except Exception as e:
            db.session.rollback()
            raise e
