from qanything_kernel.configs.model_config import VECTOR_SEARCH_TOP_K, CHUNK_SIZE, VECTOR_SEARCH_SCORE_THRESHOLD, \
    PROMPT_TEMPLATE, STREAMING, PROMPT_TEMPLTE_Q_COMMEND, PROMPT_TEMPLATE_QUERY_REWRITE
from typing import List
from qanything_kernel.connector.embedding.embedding_for_online import YouDaoEmbeddings
from qanything_kernel.connector.embedding.embedding_for_local import YouDaoLocalEmbeddings
import time
from qanything_kernel.connector.llm import OpenAILLM, ZiyueLLM
from langchain.schema import Document
from qanything_kernel.connector.database.mysql.mysql_client import KnowledgeBaseManager
from qanything_kernel.connector.database.milvus.milvus_client import MilvusClient
from qanything_kernel.connector.database.milvus.qa_milvus_client import QAMilvusClient
from qanything_kernel.utils.custom_log import debug_logger, qa_logger
from .local_file import LocalFile
from qanything_kernel.utils.general_utils import get_time
import requests
import traceback
import logging
from qanything_kernel.connector.database.ES.es_client import ElasticsearchClient 
from qanything_kernel.connector.llm.base import (BaseAnswer,
                                                 AnswerResult)

import json
from scipy.spatial.distance import cosine
import numpy as np

# LRS
from qanything_kernel.dependent_server.llm_for_local_serve.llm_server_baichuan import BaiChuanLLM
from qanything_kernel.dependent_server.llm_api_serve.glm4_serve import ZhipuAILLM
from qanything_kernel.dependent_server.llm_api_serve.ernie_commend_serve import BaiduLLM
# from qanything_kernel.dependent_server.llm_api_serve.ollama_qwen import QwenLLM

from concurrent.futures import ThreadPoolExecutor

logging.basicConfig(level=logging.INFO)

def _embeddings_hash(self):
    return hash(self.model_name)


YouDaoLocalEmbeddings.__hash__ = _embeddings_hash
YouDaoEmbeddings.__hash__ = _embeddings_hash


class LocalDocQA:
    def __init__(self):
        self.llm: object = None
        self.embeddings: object = None
        self.top_k: int = VECTOR_SEARCH_TOP_K
        self.chunk_size: int = CHUNK_SIZE
        self.chunk_conent: bool = True
        self.score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
        self.milvus_kbs: List[MilvusClient] = []
        self.qa_milvus_kbs: List[QAMilvusClient] = []
        self.milvus_summary: KnowledgeBaseManager = None
        self.mode: str = None
        self.es_kbs: List[ElasticsearchClient] = []
        self.local_rerank_service_url = "http://0.0.0.0:8776"
        self.ocr_url = 'http://0.0.0.0:8010/ocr'
        self.executor = ThreadPoolExecutor(max_workers=1)

    def get_ocr_result(self, image_data: dict):
        response = requests.post(self.ocr_url, json=image_data)
        response.raise_for_status()  # 如果请求返回了错误状态码，将会抛出异常
        return response.json()['results']

    def init_cfg(self, mode='local'):
        self.mode = mode
        self.embeddings = YouDaoLocalEmbeddings()
        if self.mode == 'local':
            # self.llm: ZiyueLLM = ZiyueLLM()  # default Qwen 7B
            # self.llm: baichuan = BaiChuanLLM()  # baichuan2-13B-Chat

            # 调用zhipu GLM4
            self.llm: ZhipuAILLM = ZhipuAILLM()
            # self.llm: QwenLLM = QwenLLM()
            self.baidu_llm: BaiduLLM = BaiduLLM()
        else:
            self.llm: OpenAILLM = OpenAILLM()
        self.milvus_summary = KnowledgeBaseManager(self.mode)

    def create_milvus_collection(self, user_id, kb_id, kb_name, use_qa_pair=False):
        milvus_kb = MilvusClient(self.mode, user_id, [kb_id])
        self.milvus_kbs.append(milvus_kb)

        # lrs 新增QA问答对知识库
        if use_qa_pair:
            qa_user_id = 'qa' + user_id
            qa_kb_id = kb_id
            qa_milvus_kb = QAMilvusClient(self.mode, qa_user_id, [qa_kb_id])
            self.qa_milvus_kbs.append(qa_milvus_kb)

        self.milvus_summary.new_milvus_base(kb_id, user_id, kb_name)

    def create_es_index(self, user_id, kb_id, kb_name):
        es_kb = ElasticsearchClient(kb_id=kb_id)
        # 创建索引
        es_kb.create_index(index_name=kb_id)
        self.es_kbs.append(es_kb)

    def match_milvus_kb(self, user_id, kb_ids):
        for kb in self.milvus_kbs:
            if user_id == kb.user_id and kb_ids == kb.kb_ids:
                debug_logger.info(f'match milvus_client: {kb}')
                return kb
        milvus_kb = MilvusClient(self.mode, user_id, kb_ids)
        self.milvus_kbs.append(milvus_kb)
        return milvus_kb

    def match_qa_milvus_kb(self, user_id, kb_ids):
        for kb in self.qa_milvus_kbs:
            if user_id == kb.user_id and kb_ids == kb.kb_ids:
                debug_logger.info(f'match milvus_client: {kb}')
                return kb
        qa_milvus_kb = QAMilvusClient(self.mode, user_id, kb_ids)
        self.qa_milvus_kbs.append(qa_milvus_kb)
        return qa_milvus_kb

    def match_es_kb(self, user_id, kb_id):
        for kb in self.es_kbs:
            if kb_id == kb.kb_id:
                debug_logger.info(f'match es_client: {kb}')
                return kb
        es_kb = ElasticsearchClient(kb_id=kb_id)
        # 创建索引
        es_kb.create_index(index_name=kb_id)
        self.es_kbs.append(es_kb)
        return es_kb

    async def insert_files_to_milvus(self, user_id, kb_id, local_files: List[LocalFile], use_qa_pair=False):
        debug_logger.info(f'insert_files_to_milvus: {kb_id}')

        milvus_kv = self.match_milvus_kb(user_id, [kb_id])

        # if use_qa_pair:
        qa_user_id = 'qa' + user_id
        qa_kb_id = kb_id
        qa_milvus_kv = self.match_qa_milvus_kb(qa_user_id, [qa_kb_id])

        es_kv = self.match_es_kb(user_id, kb_id)

        assert milvus_kv is not None
        assert es_kv is not None
        assert qa_milvus_kv is not None

        success_list = []
        failed_list = []

        for local_file in local_files:
            start = time.time()
            try:
                local_file.split_file_to_docs(self.get_ocr_result)
                content_length = sum([len(doc.page_content) for doc in local_file.docs])
            except Exception as e:
                error_info = f'split error: {traceback.format_exc()}'
                debug_logger.error(error_info)
                self.milvus_summary.update_file_status(local_file.file_id, status='red')
                failed_list.append(local_file)
                continue
            end = time.time()
            self.milvus_summary.update_content_length(local_file.file_id, content_length)
            debug_logger.info(f'split time: {end - start} {len(local_file.docs)}')

            start = time.time()
            try:
                local_file.create_embedding()
            except Exception as e:
                error_info = f'embedding error: {traceback.format_exc()}'
                debug_logger.error(error_info)
                self.milvus_summary.update_file_status(local_file.file_id, status='red')
                failed_list.append(local_file)
                continue
            end = time.time()
            debug_logger.info(f'embedding time: {end - start} {len(local_file.embs)}')

            if use_qa_pair:
                strat = time.time()
                try:
                    local_file.qa_pair_file_to_docs()
                except Exception as e:
                    error_info = f'split qa pair error: {traceback.format_exc()}'
                    debug_logger.error(error_info)
                    continue
                end = time.time()
                debug_logger.info(f'split qa pair time: {end - start} {len(local_file.qa_docs)}')

                strart = time.time()
                try:
                    local_file.create_qa_embedding()
                except Exception as e:
                    error_info = f'qa embedding error: {traceback.format_exc()}'
                    debug_logger.error(error_info)
                    continue
                end = time.time()
                debug_logger.info(f'qa embeddings time: {end - start} {len(local_file.qa_embs)}')

            self.milvus_summary.update_chunk_size(local_file.file_id, len(local_file.docs))

            # 导入数据到向量库和文档库
            ret = await milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
                                               local_file.docs, local_file.embs)

            if use_qa_pair:
                qa_ret = await qa_milvus_kv.insert_files(local_file.file_id, local_file.file_name, local_file.file_path,
                                                local_file.qa_docs, local_file.qa_embs)

            es_ret = await es_kv.import_documents(file_id=local_file.file_id, index_name=kb_id, documents=local_file.docs)

            insert_time = time.time()
            debug_logger.info(f'insert time: {insert_time - end}')
            if ret and es_ret:
                self.milvus_summary.update_file_status(local_file.file_id, status='green')
                success_list.append(local_file)
            else:
                self.milvus_summary.update_file_status(local_file.file_id, status='yellow')
                failed_list.append(local_file)
        debug_logger.info(
            f"insert_to_milvus: success num: {len(success_list)}, failed num: {len(failed_list)}")

    def deduplicate_documents(self, source_docs):
        unique_docs = set()
        deduplicated_docs = []
        for doc in source_docs:
            if doc.page_content not in unique_docs:
                unique_docs.add(doc.page_content)
                deduplicated_docs.append(doc)
        return deduplicated_docs

    def get_source_documents(self, queries, milvus_kb, es_kb, cosine_thresh=None, top_k=None):
        milvus_kb: MilvusClient
        es_kb: ElasticsearchClient
        source_documents = []
        es_results = []
        if not top_k:
            top_k = self.top_k

        embs = self.embeddings._get_len_safe_embeddings(queries)
        
        t1 = time.time()
        embs_result = milvus_kb.search_emb_async(embs=embs, top_k=top_k)

        index_name = es_kb.kb_id
        es_results = es_kb.search_documents(index_name, queries[0])  # top_k = 5

        source_chunks = embs_result + es_results

        # 过滤documnets chunk
        deduplicated_chunk = self.deduplicate_documents(source_chunks)

        # 知识分组并召回所有知识
        recall_documents = milvus_kb.expand_cand_docs(deduplicated_chunk) 

        t2 = time.time()
        debug_logger.info(f"milvus search time: {t2 - t1}")

        for doc in recall_documents:
            doc.metadata['retrieval_query'] = queries[0]  # 添加查询到文档的元数据中
            doc.metadata['embed_version'] = self.embeddings.embed_version
            source_documents.append(doc)

        if cosine_thresh:
            source_documents = [item for item in source_documents if float(item.metadata['score']) > cosine_thresh]

        return source_documents

    def reprocess_source_documents(self, query: str,
                                   source_docs: List[Document],
                                   history: List[str],
                                   prompt_template: str) -> List[Document]:
        # 组装prompt,根据max_token
        query_token_num = self.llm.num_tokens_from_messages([query])
        history_token_num = self.llm.num_tokens_from_messages([x for sublist in history for x in sublist])
        template_token_num = self.llm.num_tokens_from_messages([prompt_template])

        # logging.info(f"<self.llm.token_window, self.llm.max_token, self.llm.offcut_token, query_token_num, history_token_num, template_token_num>, types = {type(self.llm.token_window), type(self.llm.max_token), type(self.llm.offcut_token), type(query_token_num), type(history_token_num), type(template_token_num)}, values = {query_token_num, history_token_num, template_token_num}")
        limited_token_nums = self.llm.token_window - self.llm.max_token - self.llm.offcut_token - query_token_num - history_token_num - template_token_num
        new_source_docs = []
        total_token_num = 0
        for doc in source_docs:
            doc_token_num = self.llm.num_tokens_from_docs([doc])
            if total_token_num + doc_token_num <= limited_token_nums:
                new_source_docs.append(doc)
                total_token_num += doc_token_num
            else:
                remaining_token_num = limited_token_nums - total_token_num
                doc_content = doc.page_content
                doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
                while doc_content_token_num > remaining_token_num:
                    # Truncate the doc content to fit the remaining tokens
                    if len(doc_content) > 2 * self.llm.truncate_len:
                        doc_content = doc_content[self.llm.truncate_len: -self.llm.truncate_len]
                    else:  # 如果最后不够truncate_len长度的2倍，说明不够切了，直接赋值为空
                        doc_content = ""
                        break
                    doc_content_token_num = self.llm.num_tokens_from_messages([doc_content])
                doc.page_content = doc_content
                new_source_docs.append(doc)
                break

        debug_logger.info(f"limited token nums: {limited_token_nums}")
        debug_logger.info(f"template token nums: {template_token_num}")
        debug_logger.info(f"query token nums: {query_token_num}")
        debug_logger.info(f"history token nums: {history_token_num}")
        debug_logger.info(f"new_source_docs token nums: {self.llm.num_tokens_from_docs(new_source_docs)}")
        return new_source_docs

    def generate_prompt(self, query, source_docs, prompt_template):
        context = "\n".join([doc.page_content for doc in source_docs])
        prompt = prompt_template.replace("{question}", query).replace("{context}", context)
        return prompt

    def rerank_documents(self, query, source_documents):
        return self.rerank_documents_for_local(query, source_documents)

    def rerank_documents_for_local(self, query, source_documents):
        if len(query) > 300:  # tokens数量超过300时不使用local rerank
            return source_documents
        try:
            response = requests.post(f"{self.local_rerank_service_url}/rerank",
                                     json={"passages": [doc.page_content for doc in source_documents], "query": query})
            scores = response.json()
            for idx, score in enumerate(scores):
                source_documents[idx].metadata['score'] = score

            source_documents = sorted(source_documents, key=lambda x: x.metadata['score'], reverse=True)
        except Exception as e:
            debug_logger.error("rerank error: %s", traceback.format_exc())
            debug_logger.warning("rerank error, use origin retrieval docs")

        return source_documents


    # 创建响应的函数
    def create_response(self, query, result, retrieval_documents, source_documents, history):
        return {
            "query": query,
            "prompt": "",
            "result": result,
            "retrieval_documents": retrieval_documents,
            "source_documents": source_documents,
            "relation_questions": []
        }
    
    def similarity_cosine(self, a_embs, b_embs):
        # 定义两个向量
        vector_a = np.array(a_embs)
        vector_b = np.array(b_embs)

        # 计算余弦距离
        cosine_distance = cosine(vector_a, vector_b)

        # 将余弦距离转换为相似度得分，范围是0到1，越接近1表示越相似
        similarity_score = 1 - cosine_distance

        return similarity_score

    # 模型用户偏好问题生成
    async def llm_question_gen(self, question, documents):
        context = ''
        for doc in documents:
            context += doc.page_content + '\n'
        # 百度api llm
        # rsp = self.baidu_llm.chat(PROMPT_TEMPLTE_Q_COMMEND.format(query=question, context=context))

        # GLM glm-3-turbo
        rsp = await self.llm.non_stream_chat(prompt=PROMPT_TEMPLTE_Q_COMMEND.format(query=question, context=context))

        try:
            gen_questions = [q.get('question') for q in json.loads(rsp.replace("```json", "").replace("```", "").strip())]
            gen_questions = gen_questions[0:3]
        except Exception as e:
            debug_logger.info("JSON DATA PARSER IS ERROR!!!")
            gen_questions = []
        return gen_questions

    def llm_question_generation(self, question, documents):
        context = ''
        for doc in documents:
            context += doc.page_content + '\n'
        # 百度api llm
        # rsp = self.baidu_llm.chat(PROMPT_TEMPLTE_Q_COMMEND.format(query=question, context=context))

        # GLM glm-3-turbo
        rsp = self.llm.chat(prompt=PROMPT_TEMPLTE_Q_COMMEND.format(query=question, context=context))

        try:
            gen_questions = [q.get('question') for q in json.loads(rsp.replace("```json", "").replace("```", "").strip())]
            gen_questions = gen_questions[0:3]
        except Exception as e:
            debug_logger.info("JSON DATA PARSER IS ERROR!!!")
            gen_questions = []
        return gen_questions

    # 多轮对话用户问题重写
    async def llm_query_rewrite(self, query, history):
        re_query = ''
        try:
            for i, (his_q, his_a) in enumerate(history):
                re_query += f'第{i+1}轮：' + his_q + '\n'
        except Exception as e:
            debug_logger.info('传入的历史信息格式错误！', str(e))

        re_query += f'第{len(history)+1}轮：' + query    
        prompt = PROMPT_TEMPLATE_QUERY_REWRITE.format(query=re_query)
        response = await self.llm.non_stream_chat(prompt=prompt)
        return response
        

    @get_time
    async def get_knowledge_based_answer(self, query, milvus_kb, es_kb, qa_milvus_kb=None, chat_history=None, streaming: bool = STREAMING,
                                   rerank: bool = False, user_qa_pair: bool = False, recommend_que: bool = True):
        history = chat_history

        # 如果是多轮对话，即存在历史信息，则对用户query重写
        # multi conversation rewrite >> query
        if history:
            query = await self.llm_query_rewrite(query, history)

        # 快速检索
        if qa_milvus_kb and user_qa_pair:
            embs = self.embeddings._get_len_safe_embeddings([query])
            qa_embs_result = qa_milvus_kb.search_emb_async(embs=embs, top_k=1)
            if qa_embs_result and qa_embs_result[0].metadata['score'] > 0.8:
                response =  qa_embs_result[0].metadata['answer']
                history += [[query, response]]

                answer_result = AnswerResult()

                answer_result.llm_output = {"answer": response}

                answer_result.history = history

                qa_embs_result[0].metadata['kernel'] = ''
                qa_embs_result[0].metadata['retrieval_query'] = query
                qa_embs_result[0].metadata['embed_version'] = '3.5'

                qa_embs_result[0].page_content = qa_embs_result[0].page_content.strip('-|\n') + '\n\n答案：' + qa_embs_result[0].metadata['answer']

                # 提高检索相似性得分, 并保留4位小数 
                qa_embs_result[0].metadata['score'] = round(qa_embs_result[0].metadata['score'] + 0.1, 4)
                if qa_embs_result[0].metadata['score'] > 1.0:
                    qa_embs_result[0].metadata['score'] = 0.99

                retrieval_documents = qa_embs_result
                source_documents = qa_embs_result
                
                if streaming:
                    resp = answer_result.llm_output
                    resp = "data: " + json.dumps(resp, ensure_ascii=False)
                    response = self.create_response(query, resp, retrieval_documents, source_documents, history)
                    yield response, history

                    resp = f"data: [DONE]\n\n"
                    # 创建并返回最终响应
                    response = self.create_response(query, resp, retrieval_documents, source_documents, history)
                    yield response, history
                else:
                    resp = answer_result.llm_output['answer']
                    response = self.create_response(query, resp, retrieval_documents, source_documents, history)
                    yield response, history
                return

        source_documents = self.get_source_documents([query], milvus_kb, es_kb)

        # 文档过滤
        deduplicated_docs = self.deduplicate_documents(source_documents)
        debug_logger.info(f" set docs : {deduplicated_docs}")
        retrieval_documents = sorted(deduplicated_docs, key=lambda x: float(x.metadata['score']), reverse=True)
        if rerank and len(retrieval_documents) > 1:
            debug_logger.info(f"use rerank, rerank docs num: {len(retrieval_documents)}")
            retrieval_documents = self.rerank_documents(query, retrieval_documents)

        # lrs top 5 rerank retrieval_documents
        retrieval_documents = retrieval_documents[0:5]

        # 使用executor.submit来提交问题生成偏好任务到线程池
        futures = self.executor.submit(self.llm_question_generation, query, retrieval_documents)

        final_retrieval_docs = []
        # 问题和检索出的文件标题计算相似性，如果得分小于阈值，则该文件溯源信息不保留
        for i in range(len(retrieval_documents)):
            doc = retrieval_documents[i]
            query_embs = self.embeddings._get_len_safe_embeddings([query])
            title_embs = self.embeddings._get_len_safe_embeddings([doc.metadata['title']])
            smi_score = self.similarity_cosine(query_embs[0], title_embs[0])

            if smi_score > 0.5:
                doc.metadata['score'] =  round(float(doc.metadata['score']) + 0.1, 4)
                # 文件名去掉后缀
                doc.metadata['file_name'] = doc.metadata['file_name'].split('.')[0]

                if doc.metadata['score'] > 1.0:
                    doc.metadata['score'] = 0.99
                final_retrieval_docs.append(doc)

        reprocess_documents = self.reprocess_source_documents(query=query,
                                                        source_docs=retrieval_documents,
                                                        history=history,
                                                        prompt_template=PROMPT_TEMPLATE)

        prompt = self.generate_prompt(query=query,
                                    source_docs=reprocess_documents, 
                                    prompt_template=PROMPT_TEMPLATE)
        t1 = time.time()

        response = {"query": query,
                        "prompt": prompt,
                        "result": "您所提的问题，答案暂时还未入库！",
                        "retrieval_documents": [],
                        "source_documents": [],
                        "relation_questions": []
                        }
        
        for answer_result in self.llm.generatorAnswer(prompt=prompt,
                                                    history=history,
                                                    streaming=streaming):
            resp = answer_result.llm_output["answer"]
            prompt = answer_result.prompt
            history = answer_result.history

            # logging.info(f"[debug] get_knowledge_based_answer history = {history}")
            history[-1][0] = query
            response = {"query": query,
                        "prompt": prompt,
                        "result": resp,
                        "retrieval_documents": final_retrieval_docs,
                        "source_documents": final_retrieval_docs,
                        "relation_questions": []
                        }
            yield response, history

        if response['result'] == '您所提的问题，答案暂时还未入库！':
            yield response, []

        t2 = time.time()
        debug_logger.info(f"LLM time: {t2 - t1}")

        # 拿到推荐问题
        if recommend_que:
            try:
                gen_questions = futures.result()
                response['relation_questions'] = gen_questions
                delta = {'answer': ''}
                response['result'] = "data: " + json.dumps(delta, ensure_ascii=False)
                debug_logger.info(f"Generated questions: {gen_questions}")
                yield response, history
            except Exception as e:
                debug_logger.info("Error generating questions", str(e))
                gen_questions = []

            finally:
                response['relation_questions'] = gen_questions
                response['result'] = f"data: [DONE]\n\n"
                yield response, history
        else:
            response['relation_questions'] = []
            yield response, history
