#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @Project : chat_model 
# @File    : util.py
# @IDE     : PyCharm 
# @Author  :ZH
# @Time    : 2025/1/18 17:28
from typing import List

from apps import logic, embedding_utils
from apps.models import User, Session
from apps.vector_store_util import MilvusUtil
from core.setting import settings


async def get_document_list(user: User, query: str, session_object: Session) -> List[dict]:
    document_list = []
    # 查询是不是有绑定的知识库
    all_session_knowledge = await logic.session_knowledge_logic.all_session_knowledge(
        session_id=session_object.id, user=user
    )
    if all_session_knowledge:
        # 如果有那就需要进行数据处理
        # 查询现在还在使用的知识库
        all_data = await logic.knowledge_logic.all_knowledge_object(**{
            'id__in': [i.knowledge_id for i in all_session_knowledge], 'is_delete': False, 'user_id': user.id
        })
        # 获取他们的embedding服务
        embedding_session_knowledge = {}
        for i in all_data:
            if not embedding_session_knowledge.get(i.embedding_server_id):
                embedding_session_knowledge[i.embedding_server_id] = []
            embedding_session_knowledge[i.embedding_server_id].append(i)
        all_embedding_id = list(embedding_session_knowledge.keys())
        # 查询还有哪些embedding是可用的
        embedding_server_info_all = await logic.embedding_server_logic.embedding_server_info_all(
            is_delete=False, id__in=all_embedding_id
        )
        for embedding in embedding_server_info_all:
            if not embedding_utils.CONFIG.get(embedding.server_type):
                continue
            print('动态实例化方法', embedding_utils.CONFIG.get(embedding.server_type))
            config = {
                'api_key': embedding.api_key
            }
            embedding_object = getattr(embedding_utils, embedding_utils.CONFIG.get(embedding.server_type))(
                **config)
            # 先将问题转成embedding
            query_embedding = await embedding_object.batch_embedding(
                base_url=embedding.server_base_url, text_list=[query]
            )
            if not query_embedding:
                continue
            # TODO 当前版本不做动态向量库数据存储
            table_name = embedding.table_name
            # 创建Milvus服务对象
            milvus_object = MilvusUtil(
                address=settings.MILVUS_ADDRESS, port=settings.MILVUS_PORT, user=settings.MILVUS_USER,
                password=settings.MILVUS_PASSWORD, db_name=settings.MILVUS_DB_NAME)
            try:
                # 获取当前向量下 有啥知识库
                knowledge_id_list = [i.id for i in embedding_session_knowledge[embedding.id]]
                milvus_object.create_client()
                milvus_object.load_collection(collection_name=table_name)
                search_data = milvus_object.search(
                    collection_name=table_name, data=query_embedding, output_fields=[
                        'document_id', 'title', 'document', 'knowledge_id', 'file_id'
                    ], filter=f'knowledge_id in {knowledge_id_list}', limit=3
                )
                document_list += milvus_object.search_dict(search_object=search_data)
            finally:
                milvus_object.close()

    return document_list


async def document_query(query: str, document_list: List[dict] = None):
    prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {question} 
Context: {context} 
Answer:"""
    if not document_list:
        return query
    else:
        return prompt.format(question=query, context='\n'.join([f'{index+1}. {value.get("document")}' for index, value in enumerate(document_list)]))


async def rank(query, document: str):
    pass

