import json
from func.gobal.data import LLmData
from func.log.default_log import DefaultLog
from func.llm.openai_api import OpenAiApi
import requests,threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import copy

class SearchCore:
    log = DefaultLog().getLogger()
    llmData = LLmData()

    def __init__(self):
        # 连接openai
        self.openAI = OpenAiApi(api_key=self.llmData.api_key, base_url=self.llmData.base_url)

    def get_keyword_list(self, keyword_list: list):
        self.log.info(type(keyword_list))
        for keyword in keyword_list:
            self.log.info("关键字:" + keyword)

    def question_key_extract(self, question):
        '''
        第一步：问题关键字抽取
        :param question:
        :return:
        '''

        prompt = f"""
                Please extract the keywords from the user's question:"{question}".
                output a keyword array in order from important to unimportant:{{"keyword_list":["keyword1","keyword2","keyword...n"]}}.
                Note that there should be no duplication between the extracted keywords.
                """
        self.log.info(prompt)
        response = self.openAI.client.chat.completions.create(
            model=self.llmData.structure_model_name,
            messages=[
                {"role": "system", "content": "Question important keyword extractor."},
                {"role": "user", "content": prompt}
            ],
            top_p=0.01,
            temperature=0.01,
            response_format={"type": "json_object"}
        )
        keyword_list_str = response.choices[0].message.content
        keyword_list = json.loads(keyword_list_str)
        return keyword_list

    def question_key_extract_v2(self, question):
        '''
        第一步：问题关键字抽取
        :param question:
        :return:
        '''
        # aiAgents = AiAgent("问题重要关键字抽取器", self.llmData.model_name,
        #                    "问题重要关键字抽取器。你根据用户的问题，抽取里面的关键字，由重要到不重要的顺序输出一个关键字数组",
        #                    [self.get_keyword_list])
        # response = self.swarm.run(
        #     agent=aiAgents.agent,
        #     messages=[{"role": "user", "content": question}],
        #     # response_format=["关键词1","关键词2","关键词3"],
        #     # context_variables={"response_format":"""{"type": "json_object"}"""}
        # )
        # keyword_list_str = response.messages[0]["tool_calls"][0]["function"]["arguments"]
        # keyword_list = json.loads(keyword_list_str)
        # return keyword_list
        prompt = f"""
        Please extract the keywords from the user's question:"{question}".
        Each extracted keyword should ideally be a phrase.
        output a keyword array in order from important to unimportant:{{"keyword_list":["keyword1","keyword2","keyword...n"]}}.
        Note that there should be no duplication between the extracted keywords.
        """
        self.log.info(prompt)
        response = self.openAI.client.chat.completions.create(
            model=self.llmData.extract_model_name,
            messages=[
                {"role": "system", "content": "Question important keyword extractor."},
                {"role": "user", "content": prompt}
            ],
            top_p=0.01,
            temperature=0.01,
            response_format={"type": "json_object"}
        )
        keyword_list_str = response.choices[0].message.content
        keyword_list = json.loads(keyword_list_str)
        return keyword_list

    def synonyms_extract(self, keyword):
        '''
        第二步：近义词抽取
        :param keyword:
        :return:
        '''
        prompt = f"""
        Please list the homophones and synonyms of "{keyword}".
        The output format is an array:{{"keyword_list":["keyword1","keyword2","keyword...n"]}}
        """
        self.log.info(prompt)
        response = self.openAI.client.chat.completions.create(
            model=self.llmData.model_name,
            messages=[
                {"role": "system", "content": "You are a synonym converter."},
                {"role": "user", "content": prompt}
            ],
            top_p=0.01,
            temperature=0.01,
            response_format={"type": "json_object"}
        )
        keyword_list_str = response.choices[0].message.content
        keyword_list = json.loads(keyword_list_str)
        return keyword_list

    def search_konwledge_title(self, keyword):
        '''
        第三步：根据论文title检索标题中包含特定文本的论文片段
        :param keyword:
        :return:
        '''
        try:
            response = requests.get(
                f"http://180.184.65.98:38880/atomgit/query_by_title_contain?top_k=1000&title={keyword}", verify=False,
                timeout=(20, 120))
            jsonData = response.json()
            return jsonData
        except Exception as e:
            self.log.exception(f"【search_konwledge_title】搜索异常")
            return {}

    def search_konwledge_content(self, keyword, top_k=10):
        '''
        根据query检索论文片段【相识度检索】
        :param keyword:
        :return:
        '''
        try:
            response = requests.get(f"http://180.184.65.98:38880/atomgit/search_papers?query={keyword}&top_k={top_k}", verify=False,
                                    timeout=(20, 120))
            jsonData = response.json()
            return jsonData
        except Exception as e:
            self.log.exception(f"【search_konwledge_content】搜索异常")
            return {}

    def search_by_title(self, keyword, top_k=3):
        '''
        根据论文标题查询相关的论文片段【相识度检索】
        :param keyword:
        :return:
        '''
        try:
            response = requests.get(f"http://180.184.65.98:38880/atomgit/query_by_title_like?title={keyword}&top_k={top_k}", verify=False,
                                    timeout=(20, 120))
            jsonData = response.json()
            return jsonData
        except Exception as e:
            self.log.exception(f"【search_by_title】搜索异常")
            return {}

    def search_konwledge_by_paper_id(self, paper_id, top_k):
        '''
        第三步：根据query检索论文片段【相识度检索】
        :param keyword:
        :return:
        '''
        try:
            response = requests.get(
                f"http://180.184.65.98:38880/atomgit/query_by_paper_id?paper_id={paper_id}&top_k={top_k}", verify=False,
                timeout=(20, 120))
            jsonData = response.json()
            return jsonData
        except Exception as e:
            self.log.exception(f"【search_konwledge_by_paper_id】搜索异常")
            return {}

    def key_deduplication_remove(self, main_word, keyword_list):
        kl = keyword_list["keyword_list"]
        for keyword in kl:
            if keyword in main_word:
                self.log.info("删除重复关键字:" + keyword)
                kl.remove(keyword)
        return keyword_list

    def abstract_extract(self, knowledge_chuck):
        abstract_list = {}
        for key, entity in knowledge_chuck.items():
            # 定位论文paper_id
            paper_id = entity["paper_id"]
            # 存在paper_id跳过
            if paper_id in abstract_list:
                continue
            # 加入paper_id的abstract摘要
            chunk_id = entity["chunk_id"]
            paper_title = entity["paper_title"]
            self.log.info("处理摘要paper_id:" + paper_id + ",paper_title:" + paper_title)
            # 如果当前是chunk_id=0就不需要请求接口了
            if chunk_id == 0:
                chunk_text = entity["chunk_text"]
            else:
                m = self.search_konwledge_by_paper_id(paper_id, 1)
                if len(m) == 0:
                    continue
                chunk_text = m[0]["chunk_text"]
            start_index = chunk_text.lower().find("abstract")
            abstract_text = chunk_text[start_index:]
            # 根据paper_id加入摘要
            if len(abstract_text) > 10:
                # 有摘要范围
                abstract_list[paper_id] = abstract_text
            else:
                # 摘要字符太短，第0块数据整块加入
                abstract_list[paper_id] = chunk_text
        return abstract_list

    def search_konwledge(self, question):
        """
        知识文献搜索入口
        :param question: 问题
        :return:
        """
        self.log.info("question:" + question)
        knowledge_chuck = {}  # 最终知识库装载结构  "id": {"id,"paper_title","chunk_id","chunk_text"}

        # 第一步: 问题关键字抽取
        self.log.info("=========问题关键字抽取===========")
        main_word = question
        keyword_list = self.question_key_extract(question)
        if len(keyword_list["keyword_list"]) == 0:
            keyword_list["keyword_list"] = [question]
        elif len(keyword_list["keyword_list"]) > 0:
            main_word = keyword_list["keyword_list"][0]
            keyword_list["keyword_list"].pop(0)
        self.log.info("主要关键字:" + main_word)
        self.log.info(f"关键字列表：{keyword_list}")
        keyword_list = self.key_deduplication_remove(main_word, keyword_list)
        self.log.info(f"过滤重复：{keyword_list}")


        # # 第二步: 主关键字-转换近义词
        # self.log.info("=========转换近义词===========")
        # synonyms_keyword_list = self.synonyms_extract(main_word)  # 近义词关键字
        # if main_word not in synonyms_keyword_list["keyword_list"]:
        #     synonyms_keyword_list["keyword_list"].append(main_word)  # 加入原始关键字
        # self.log.info(f"近义词：{synonyms_keyword_list}")
        #
        # # 第三步: 根据论文title检索标题中包含特定文本的论文片段
        # self.log.info("=========检索标题[模糊匹配]===========")

        # title_chuck = []
        # for keyword in synonyms_keyword_list["keyword_list"]:
        #     chuck_list = self.search_konwledge_title(keyword)
        #     self.log.info("搜索关键字:" + keyword + "，搜索结果:" + str(len(chuck_list)))
        #     for chuck in chuck_list:
        #         paper_id = str(chuck["paper_id"])
        #         title_chuck.append(paper_id)
        #     chuck_list = None
        # self.log.info(f"标题id：{title_chuck}")
        # self.log.info("搜索标题id数量: " + str(len(title_chuck)))
        #
        # for paper_id in title_chuck:
        #     chuck_list = self.search_konwledge_by_paper_id(paper_id, 2000)
        #     self.log.info("paper_id:" + paper_id + "，搜索结果:" + str(len(chuck_list)))
        #     for chuck in chuck_list:
        #         id = str(chuck["id"])
        #         if id not in knowledge_chuck:
        #             chuck["id"] = id
        #             knowledge_chuck[id] = chuck
        #     chuck_list = None
        # self.log.info("搜索标题索引: " + str(len(title_chuck)))
        # self.log.info("搜索标题: " + str(len(knowledge_chuck)))

        # 第四步: 根据query检索论文片段【相识度检索】
        self.log.info("=========检索论文片段[向量搜索]===========")
        for keyword in keyword_list["keyword_list"]:
            last_word = main_word + " " + keyword
            chuck_list = self.search_konwledge_content(last_word)
            self.log.info("搜索关键字:" + last_word + "，搜索结果:" + str(len(chuck_list)))
            for chuck in chuck_list:
                id = str(chuck["id"])
                if id not in knowledge_chuck:
                    chuck["entity"]["id"] = id
                    knowledge_chuck[id] = chuck["entity"]
            chuck_list = None

        # self.log.info(knowledge_chuck)
        # self.log.info("搜索内容: " + str(len(knowledge_chuck)))

        self.log.info("最终搜索文献: " + str(len(knowledge_chuck)))

        return knowledge_chuck

    def get_keywords(self, question):
        """
        获取问题关键字【2.0】
        :param question:
        :return:
        """
        self.log.info("question:" + question)
        knowledge_chuck = {}  # 最终知识库装载结构  "id": {"id,"paper_title","chunk_id","chunk_text"}

        # 第一步: 问题关键字抽取
        self.log.info("=========问题关键字抽取===========")
        main_word = question
        keyword_list = self.question_key_extract(question)
        if len(keyword_list["keyword_list"]) == 0:
            keyword_list["keyword_list"] = [question]
        elif len(keyword_list["keyword_list"]) > 0:
            main_word = keyword_list["keyword_list"][0]
            keyword_list["keyword_list"].pop(0)
        self.log.info("主要关键字:" + main_word)
        self.log.info(f"关键字列表：{keyword_list}")
        keyword_list = self.key_deduplication_remove(main_word, keyword_list)
        self.log.info(f"过滤重复：{keyword_list}")
        return main_word,keyword_list

    def abstract_extract_v2(self,topic, main_word, keyword_list, threshold=0.5, top_k=1, sec_top_k=4, gradient=-1, max_workers=100):
        """
        获取摘要信息【2.0】
        :param main_word:  主要关键字
        :param keyword_list:  次级关键字
        :param threshold: 相识度
        :param top_k: 标题topk
        :param sec_top_k: 次要关键字topk
        :param gradient: sec_top_k捕获的阶梯递减值
        :return:
        """
        lock = threading.Lock()
        knowledge_chuck = {}  # 最终知识库装载结构  "paper_id": {"paper_id":paper_id","title":"title","abstract":"abstract"}

        #title_search_list = copy.deepcopy(keyword_list)
        keyword_list_temp = copy.deepcopy(keyword_list)
        keyword_list_temp["keyword_list"].insert(0, topic)  #关键字列表第一个加入主关键字

        self.log.info("=========摘要获取：检索论文片段[向量搜索]===========")
        def process_content(main_word , keyword, num, sec_top_k):
            if num == 0:
                last_word = keyword
                self.log.info(f"[入口][{num}]检索论文片段- 搜索关键字:" + last_word)
                chuck_list = self.search_konwledge_content(last_word, top_k)
            else:
                # 阶梯递减
                sec_top_k = sec_top_k + (gradient * (num-1))
                if sec_top_k <= 0:
                    sec_top_k = 1

                last_word = main_word + " " + keyword
                self.log.info(f"[入口][{num}]检索论文片段- 搜索关键字:" + last_word)
                chuck_list = self.search_konwledge_content(last_word, sec_top_k)

            self.log.info(f"[完成][{num}]检索论文片段- 搜索关键字:" + last_word + "，搜索结果:" + str(len(chuck_list)))
            for chuck in chuck_list:
                if chuck['distance'] < threshold:
                    continue
                paper_id = str(chuck["entity"]["paper_id"])
                chunk_id = chuck["entity"]["chunk_id"]

                chunk_text = chuck["entity"]["chunk_text"]
                if paper_id not in knowledge_chuck:
                    if chunk_id == 0:
                        abstract = self.cut_abstract(chunk_text)
                        with lock:
                            knowledge_chuck[paper_id] = {"paper_id": paper_id, "title": chuck["entity"]["paper_title"], "abstract": abstract}
                    else:
                        with lock:
                            knowledge_chuck[paper_id] = {"paper_id": paper_id, "title": chuck["entity"]["paper_title"], "abstract": ""}
                    self.log.info(f"[{num}]摘要新增：{paper_id}")
                elif chunk_id == 0:
                    abstract = self.cut_abstract(chunk_text)
                    with lock:
                        knowledge_chuck[paper_id].update({"abstract": abstract})
                    self.log.info(f"[{num}]摘要更新：{paper_id}")

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # Process keywords with progress bar
            futures_content = [executor.submit(process_content, main_word, keyword, i, sec_top_k) for i, keyword in
                                enumerate(keyword_list_temp["keyword_list"])]
            list(tqdm(futures_content, total=len(futures_content), desc="Processing content"))

        # self.log.info("=========摘要获取：标题搜索[向量搜索]===========")
        # def process_title(main_word, keyword, num, sec_top_k):
        #     # 阶梯递减
        #     sec_top_k = sec_top_k + (gradient * num)
        #     if sec_top_k <= 0:
        #         sec_top_k = 1
        #
        #     last_word = main_word + " " + keyword
        #     self.log.info("[入口]标题搜索- 搜索关键字:" + last_word)
        #     chuck_list = self.search_by_title(last_word, sec_top_k)
        #
        #     self.log.info("[完成]标题搜索- 搜索关键字:" + last_word + "，搜索结果:" + str(len(chuck_list)))
        #     for chuck in chuck_list:
        #         if len(chuck) == 0:
        #             continue
        #         paper_id = str(chuck[0]["paper_id"])
        #         chunk_id = chuck[0]["chunk_id"]
        #         if paper_id not in knowledge_chuck:
        #             chunk_text = chuck[0]["chunk_text"]
        #             if chunk_id == 0:
        #                 abstract = self.cut_abstract(chunk_text)
        #                 with lock:
        #                     knowledge_chuck[paper_id] = {"paper_id": paper_id, "title": chuck[0]["paper_title"], "abstract": abstract}
        #                 self.log.info(f"摘要新增：{paper_id}")
        #
        # with ThreadPoolExecutor(max_workers=max_workers) as executor:
        #     # Process keywords with progress bar
        #     futures_title = [executor.submit(process_title, main_word, keyword, i, sec_top_k) for i, keyword in
        #                         enumerate(title_search_list["keyword_list"])]
        #     list(tqdm(futures_title, total=len(futures_title), desc="Processing title"))

        self.log.info("=========补充缺失的摘要===========")
        title_with_abstract = []  #规格：[{"title":"title","abstract":"abstract"}]

        def process_title_with_abstract(key, entity):
            self.log.info("[入口]摘要二次确认更新:" + key)
            abstract = entity["abstract"]
            paper_id = entity["paper_id"]
            # 处理空摘要
            if abstract == "":
                m = self.search_konwledge_by_paper_id(paper_id, 1)
                if len(m) == 0:
                    return
                chunk_text = m[0]["chunk_text"]
                abstract = self.cut_abstract(chunk_text)
                with lock:
                    knowledge_chuck[paper_id].update({"abstract": abstract})
                self.log.info(f"[完成]摘要二次确认更新：{paper_id}")
            self.log.info(f"最终摘要：{paper_id}")
            title_with_abstract.append(knowledge_chuck[paper_id])

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # Process keywords with progress bar
            futures_title_with_abstract = [executor.submit(process_title_with_abstract, key, entity) for key, entity in
                                knowledge_chuck.items()]
            list(tqdm(futures_title_with_abstract, total=len(futures_title_with_abstract), desc="Processing title_with_abstract"))

        return title_with_abstract

    def cut_abstract(self, chunk_text, num = 10):
        """
        截取摘要【2.0】
        :param chunk_text:
        :param num:
        :return:
        """
        start_index = chunk_text.lower().find("abstract")
        abstract_text = chunk_text[start_index:]
        if len(abstract_text) <= num:
            # 摘要字符太短，第0块数据整块加入
            abstract_text = chunk_text

        return abstract_text
