#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@File    ：vector.py
@Author  ：平
@Date    ：2025/9/28 14:21 
"""
from typing import Literal

from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore, ElasticsearchRetriever

from app.config.config import settings
from app.vector.es_client import es_client
from app.vector.embedding import embeddings

import logging


logger = logging.getLogger(__name__)


class VectorStore:
    """
    向量存储类
    """

    SEARCH_CUSTOM_QUERY_MAPPING = {
        "hybrid": lambda qb, q: {
            "knn": {
                **qb.get('knn', {}),
                'num_candidates': max(100, qb.get('knn', {}).get('k', 10) * 10),
                'boost': 0.6
            },
            "query": {
                "multi_match": {
                    "query": q,
                    "fields": [
                        "text",
                        "metadata.title",
                        "metadata.description",
                        "metadata.author.keyword",
                        "metadata.publisher.keyword",
                        "metadata.publishYear.keyword",
                        "metadata.category",
                        "metadata.tags",
                        "metadata.summary"
                    ],
                    "type": "best_fields",
                    "fuzziness": "AUTO",
                    "boost": 0.4
                }
            },
        },
        "vector": lambda qb, q: {
            "knn": {
                **qb.get('knn', {}),
                'num_candidates': max(100, qb.get('knn', {}).get('k', 10) * 10)
            }
        },
        "bm25": lambda qb, q: {
            "query": {
                "multi_match": {
                    "query": q,
                    "fields": [
                        "text",
                        "metadata.title",
                        "metadata.description",
                        "metadata.author.keyword",
                        "metadata.publisher.keyword",
                        "metadata.publishYear.keyword",
                        "metadata.category",
                        "metadata.tags",
                        "metadata.summary"
                    ],
                    "type": "best_fields",
                    "fuzziness": "AUTO",
                }
            },
        }
    }

    def __init__(self):
        self.es_client = es_client
        self.evs = ElasticsearchStore(
            settings.ELASTICSEARCH_INDEX_NAME,
            embedding=embeddings,
            es_connection=es_client
        )

    def store(self, docs: list[Document], batch_size: int = 64) -> bool:
        """
        存储文档
        Args:
            docs: 文档数据
            batch_size: 每批处理的文档数，默认为64
        Returns: 返回是否存储成功
        """
        total, success = 0, 0
        for i in range(0, len(docs), batch_size):
            batch_docs = docs[i:i + batch_size]
            total += 1
            try:
                self.evs.add_documents(batch_docs)  # 调用现有的 store 方法
                success += 1
                logger.info(f"已存储第 {i // batch_size + 1} 批数据")
            except Exception as e:
                logger.exception(f"存储第{i // batch_size + 1}批数据失败:{e}")
        return success == total

    def search(self, query: str, k: int = 10, search_type: Literal["hybrid", "vector", "bm25"] = "hybrid") -> \
            list[tuple[Document, float]]:
        """
        检索
        Args:
            query: 查询内容
            k: 数量
            search_type: 搜索类型（hybrid、vector、bm25）

        Returns: 元组（文档，分数）列表

        """
        return self.evs.similarity_search_with_score(query, k=k,
                                                     custom_query=VectorStore.SEARCH_CUSTOM_QUERY_MAPPING[search_type])

    def remove(self,book_id:int)->bool:
        """
        删除数据
        Args:
            book_id: 书籍id

        Returns:

        """
        query = {
            "query":{
                "term":{
                    "metadata.id":book_id
                }
            }
        }
        try:
            response = self.es_client.delete_by_query(
                index=settings.ELASTICSEARCH_INDEX_NAME,
                body=query,
                refresh=True
            )
            # 解析返回结果判断是否删除成功
            deleted_count = response.get('deleted', 0)
            failures = response.get('failures', [])

            if deleted_count > 0 and not failures:
                logger.info(f"成功删除 {deleted_count} 条数据")
                return True
            else:
                logger.warning(f"删除操作未完成预期效果，删除了 {deleted_count} 条数据，失败项: {failures}")
                return False
        except Exception as e:
            logger.exception(f"删除数据失败:{e}")
            return False



vectorstore: VectorStore = VectorStore()

if __name__ == '__main__':
    pass
    # current_file = Path(__file__)
    # pdf_path = current_file.parent.parent.parent / "cache" / "SpringBoot揭秘+快速构建微服务体系+(王福强著)+(Z-Library).pdf"
    # if pdf_path.exists():
    #     p = Pipline(pdf_path, Pipline.FileSuffix.PDF,
    #                 metadata={
    #                     "id": 123456,
    #                     "title": "基于LangChain与Elasticsearch的RAG应用搭建深度研究报告",
    #                     "author": "qwen"
    #                 })
    #     print(p.load().split().get()[1])
    #     vectorstore.store(p.load().split().get()[1])
    # for doc, score in vectorstore.search("RAG", k=10, search_type="hybrid"):
    #     print(json.dumps({"text": doc.page_content, "metadata": doc.metadata, "score": score}, ensure_ascii=False,
    #                      indent=4))
