# -*- coding:utf-8 -*-
# @Time    : 2023/6/5 02:57
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : knowledge_embedding.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import traceback
import requests
import re
import torch
import openai
import os
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
import pandas as pd

import settings
from common.log import logger
from common import constants
from knowledge_base.transport_utils import FtpUtil

from knowledge_base.local_embedding import LocalEmbeddings
from util_tool.draw_picture import plot_casual_graph

# 获取该脚本的绝对路径
curPath = os.path.abspath(os.path.dirname(__file__))

ftpUtil = FtpUtil()


class KBQuery():  # parent class
    current_script_path = os.path.abspath(__file__)
    csv_file_path = os.path.normpath(
        os.path.join(current_script_path, '../../bot/insurance_planner_gpt/doc/products_config.csv'))
    products_config_df = pd.read_csv(csv_file_path, encoding='utf-8')
    products_config_df = products_config_df.set_index('产品名称')
    products_config = products_config_df.to_dict(orient='index')

    def __init__(self, embeddings):
        self.embeddings = embeddings

        self.folder_path = curPath + "/embedding_base/insurance_planner_gpt_bge"
        self.product_item_path = curPath + "/embedding_base/product_items/"
        # self.products_folder_path = curPath + "/embedding_base/products/"
        self.insurance_knowledge_path = curPath + "/embedding_base/insurance_knowledge/"
        self.solutions_path = curPath + "/embedding_base/solutions"
        self.company_knowledge_path = curPath + "/embedding_base/company_knowledge/"

        if not os.path.exists(self.folder_path):
            os.makedirs(self.folder_path)
        # if not os.path.exists(self.products_folder_path):
        #     os.makedirs(self.products_folder_path)
        if not os.path.exists(self.insurance_knowledge_path):
            os.makedirs(self.insurance_knowledge_path)

        if not os.path.exists(os.path.join(self.folder_path, 'index.faiss')) or \
                not os.path.exists(os.path.join(self.folder_path, 'index.pkl')):
            self.download_embeddings_files(self.folder_path, ['index.faiss', 'index.pkl'])

    def download_embeddings_files(self, local_path, files):
        try:
            for file in files:
                r = requests.get(os.path.join(constants.KNOWLEDGE_EMBEDDINGS_DATA_SERVER, file))
                with open(os.path.join(local_path, file), 'wb') as f:
                    f.write(r.content)
        except Exception as ee:
            logger.error(traceback.format_exc())
            logger.info('下载向量库文件失败, 请检查网络连接')

    def save_from_csv(self, csv_paths):
        """
        保存FAQ到向量库
        Args:
            csv_path: csv文件路径, 列包含 标准问 扩展问 答案
        Returns: 无
        """
        try:
            for csv_path in csv_paths:
                if not os.path.exists(csv_path):
                    name_and_path = os.path.split(csv_path)
                    self.download_embeddings_files(name_and_path[0], [name_and_path[-1]])

            faqs = []
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        faqDictList = df.to_dict('records')
                        logger.info(f'{file}加载成功, 共有{len(faqDictList)}条数据')
                        faqs.extend(faqDictList)
                else:
                    df = pd.read_csv(csv_path)
                    faqDictList = df.to_dict('records')
                    logger.info(f'{csv_path}加载成功, 共有{len(faqDictList)}条数据')
                    faqs.extend(faqDictList)

            logger.info('共有{}条数据'.format(len(faqs)))
            docs = []
            for faq in faqs:
                if "答案" in faq:
                    page_content = "用户问题:" + faq['用户问题'].strip() + '\n答案:' + faq['答案'].strip()
                elif "解决方案" in faq:
                    page_content = "用户问题:" + faq['用户问题'].strip() + '\n答案:' + faq['解决方案'].strip()
                else:
                    page_content = "用户问题:" + faq['知识点'].strip() + '\n知识内容:' + faq['知识内容'].strip()
                document = Document(
                    page_content=page_content,
                    metadata=faq)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.folder_path)

            if settings.ONLINE:
                # 上传到文件服务器
                ftpUtil.pushFile(['embedding_base/insurance_planner_gpt/index.pkl',
                                  'embedding_base/insurance_planner_gpt/index.faiss'])
        except Exception as ee:
            logger.error(traceback.format_exc())

    def save_product_item_from_csv(self, csv_paths):
        """
        保存FAQ到向量库
        Args:
            csv_path: csv文件路径, 列包含 产品名称 产品属性 属性值
        Returns: 无
        """
        try:
            for csv_path in csv_paths:
                if not os.path.exists(csv_path):
                    name_and_path = os.path.split(csv_path)
                    self.download_embeddings_files(name_and_path[0], [name_and_path[-1]])

            faqs = []
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        faqDictList = df.to_dict('records')
                        logger.info(f'{file}加载成功, 共有{len(faqDictList)}条数据')
                        faqs.extend(faqDictList)
                else:
                    df = pd.read_csv(csv_path)
                    faqDictList = df.to_dict('records')
                    logger.info(f'{csv_path}加载成功, 共有{len(faqDictList)}条数据')
                    faqs.extend(faqDictList)

            logger.info('共有{}条数据'.format(len(faqs)))
            docs = []
            for faq in faqs:
                page_content = None
                if "产品属性" in faq:
                    page_content = "保险产品:" + faq['保险产品'].strip() + '\n产品属性:' + faq['产品属性'].strip()
                elif "产品问题" in faq:
                    page_content = "产品名称:" + faq['产品名称'].strip() + '\n产品问题:' + faq['产品问题'].strip()
                document = Document(
                    page_content=page_content,
                    metadata=faq)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.product_item_path)

        except Exception as ee:
            logger.error(traceback.format_exc())

    def save_product_graph_from_csv(self, graph_csv_paths):
        """
        保存FAQ到向量库
        Args:
            csv_path: csv文件路径, 列包含 标准问 扩展问 答案
        Returns: 无
        """
        try:
            causal_cots = []
            for csv_path in graph_csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.txt'):
                            continue
                        with open(csv_path + file, 'r', encoding='utf-8') as f:
                            lines = f.readlines()
                        for line in lines:
                            if len(line.strip()) == 0:
                                continue
                            product_graph = {}
                            if "product_type" in csv_path:
                                product_type_name = file.split('.')[0]
                                product_graph['产品类型'] = product_type_name
                                if "医疗" in product_type_name:
                                    product_graph['保险类型'] = "医疗险"

                                product_graph['产品因果图'] = line
                            elif "insurance_type" in csv_path:
                                product_graph['保险类型'] = file.split('.')[0]
                                product_graph['产品因果图'] = line
                            elif "plan" in csv_path:
                                product_graph['方案类型'] = file.split('.')[0]
                                product_graph['方案因果图'] = line
                            else:
                                product_name = file.split('.')[0]
                                if "医疗" in product_name:
                                    product_graph['保险类型'] = "医疗险"
                                    if "百万医疗" in product_name:
                                        product_graph['产品类型'] = "百万医疗险"
                                    elif "中高端医疗" in product_name:
                                        product_graph['产品类型'] = "中高端医疗"
                                elif "重疾" in product_name or "重大疾病" in product_name:
                                    product_graph['保险类型'] = "重疾险"
                                elif "意外" in product_name:
                                    product_graph['保险类型'] = "意外险"
                                elif "寿险" in product_name:
                                    product_graph['保险类型'] = "寿险"

                                product_graph['保险产品'] = file.split('.')[0]
                                product_graph['产品因果图'] = line

                            causal_cots.append(product_graph)
                        print('lines', lines)
                        logger.info(f'{file}加载成功, 共有{len(causal_cots)}条数据')

            logger.info('共有{}条数据'.format(len(causal_cots)))
            docs = []
            for cot in causal_cots:
                if "产品因果图" in cot:
                    page_content = cot['产品因果图'].strip()
                else:
                    page_content = cot['方案因果图'].strip()
                document = Document(
                    page_content=page_content,
                    metadata=cot)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.products_folder_path)

        except Exception as ee:
            logger.error(traceback.format_exc())

    def save_product_knowledge_from_txt(self, knowledge_path):
        docpaths = [docname for docname in os.listdir(knowledge_path)
                    if not docname.startswith('.') and not docname.endswith('txt')]
        product_docs = []
        try:
            for doc in docpaths:
                docpath = os.path.join(knowledge_path, doc)
                for file in os.listdir(docpath):
                    filename = file.replace('.txt', '')
                    filepath = os.path.join(docpath, file)
                    with open(filepath, 'r', encoding='utf-8') as f:
                        if not filepath.endswith('txt'):
                            continue
                        contents = f.read()
                        paragraphs = contents.split('\n\n')
                        for paragraph in paragraphs:
                            if paragraph.strip():
                                if doc == "保险知识":
                                    product_content = {
                                        "filename": "保险知识",
                                        "content": paragraph.strip()
                                    }
                                else:
                                    product_content = {
                                        "filename": filename,
                                        "content": paragraph.strip()
                                    }
                                product_docs.append(product_content)

            logger.info('共有{}条数据'.format(len(product_docs)))
            docs = []
            for product_content in product_docs:
                if product_content["filename"] == "保险知识":
                    page_content = "保险知识:" + product_content["content"]
                else:
                    page_content = "保险产品:" + product_content["filename"] + "\n产品条款:" + product_content["content"]
                document = Document(
                    page_content=page_content,
                    metadata=product_content)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(self.insurance_knowledge_path)
            logger.info(f"保存成功")

        except Exception as e:
            logger.error(traceback.format_exc())

    def save_solution_from_csv(self, csv_paths):
        """
        保存解决方案到向量库
        Args:
            csv_path: csv文件路径, 列包含 用户问题 用户情境 解决方案
        Returns: 无
        """
        try:
            solutions = []
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        df = df.dropna(how='all')
                        df['solution_stage'] = file.replace('.csv', '')
                        # nan空的转成None
                        df = df.where(df.notnull(), None)
                        # 用户情境列替换掉中文冒号逗号和空行
                        df['用户情境'] = df['用户情境'].str.replace("\r", "").str.replace("\n", "").str.replace(" ",
                                                                                                        "").str.replace(
                            "：", ":").str.replace("，", ",")
                        # 删掉所有空格字符
                        df = df.applymap(lambda x: x.replace(" ", "") if isinstance(x, str) else x)
                        DictList = df.to_dict('records')
                        logger.info(f'{file}加载成功, 共有{len(DictList)}条数据')
                        solutions.extend(DictList)
            logger.info('共有{}条数据'.format(len(solutions)))
            docs = []
            for solution in solutions:
                page_content = "用户问题:" + solution['用户问题'].strip()
                solution['问题解决方案'] = '【' + solution['solution_stage'] + '】：' + solution['用户问题'].strip() + '->' + \
                                     solution['解决方案'].strip()
                # 给阶段三的解决方案的产品加上对应的 保险类型、 保险产品类型
                if solution['solution_stage'] == '阶段三':
                    # print(solution['问题解决方案'])
                    product_pattern = re.compile(r'->(.+?)（保险产品名称）')
                    product_match = re.search(product_pattern, solution['问题解决方案'])
                    if product_match:
                        product_name = product_match.group(1)
                        product_type = self.products_config.get(product_name, '')
                        if product_type:
                            s = solution['问题解决方案']
                            insert_position = s.find("（保险产品名称）") + len("（保险产品名称）")
                            new_content = '（保险类型：' + product_type['保险类型'] + \
                                          '，保险产品类型：' + product_type['保险产品类型'] + '）'
                            updated_string = s[:insert_position] + new_content + s[insert_position:]
                            solution['问题解决方案'] = updated_string
                            # solution['问题解决方案'] = solution['问题解决方案'] + '（保险类型：' + product_type['保险类型'] + \
                            #                      '，保险产品类型：' + product_type['保险产品类型'] + '）'
                        else:
                            print(solution['问题解决方案'], '没检索到产品配置，不会追加险种类型')
                document = Document(
                    page_content=page_content,
                    metadata=solution)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.solutions_path)

        except Exception as ee:
            logger.error(traceback.format_exc())

    def save_company_knowledge_from_csv(self, csv_paths):
        """
        保存解决方案到向量库
        Args:
            csv_path: csv文件路径, 列包含 用户问题  话术
            实际目前用户情境、优先级这些都还没用到
        Returns: 无
        """
        try:
            solutions = []
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        df = df.dropna(how='all')
                        # nan空的转成None
                        df = df.where(df.notnull(), '无')
                        # 删掉所有空格字符
                        df = df.applymap(lambda x: x.replace(" ", "") if isinstance(x, str) else x)
                        DictList = df.to_dict('records')
                        logger.info(f'{file}加载成功, 共有{len(DictList)}条数据')
                        solutions.extend(DictList)
            logger.info('共有{}条数据'.format(len(solutions)))
            docs = []
            for solution in solutions:
                if "用户问题" in solution:
                    page_content = "用户问题：" + str(solution['用户问题']).strip()
                else:
                    page_content = "沟通策略：" + str(solution['沟通策略']).strip()
                document = Document(
                    page_content=page_content,
                    metadata=solution)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.company_knowledge_path)

        except Exception as ee:
            logger.error(traceback.format_exc())

    def load_all_knowledge_base(self):
        """
        加载向量库
        Returns: 无
        """
        self.local_knowledge_base = FAISS.load_local(folder_path=self.folder_path,
                                                     embeddings=self.embeddings)
        # self.local_causal_graph = FAISS.load_local(folder_path=self.products_folder_path,
        #                                            embeddings=self.embeddings)
        self.product_item_knowledge = FAISS.load_local(folder_path=self.product_item_path,
                                                       embeddings=self.embeddings)
        self.insurance_knowledge = FAISS.load_local(folder_path=self.insurance_knowledge_path,
                                                    embeddings=self.embeddings)
        self.solutions = FAISS.load_local(folder_path=self.solutions_path,
                                          embeddings=self.embeddings)
        self.company_knowledge = FAISS.load_local(folder_path=self.company_knowledge_path,
                                                  embeddings=self.embeddings)

    def search(self, query_str, top_k=4):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_all_knowledge_base()
        docs = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        return docs

    def search_with_score(self, query_str, top_k=4, limit_score=0.6):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_all_knowledge_base()
        # 如果query_str以标点符号结尾，则去掉最后的标点符号
        doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        for doc, score in doc_scores:
            if score < limit_score:
                docs.append(doc)
        return docs

    def search_product_with_score(self, query_str, top_k=4, limit_score=1.0):
        if not hasattr(self, 'local_causal_graph'):
            self.load_all_knowledge_base()
        # 如果query_str以标点符号结尾，则去掉最后的标点符号
        doc_scores = self.local_causal_graph.similarity_search_with_score(query_str, k=top_k)
        docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        for doc, score in doc_scores:
            if score < limit_score:
                docs.append(doc)
        return docs

    def search_product_item_with_score(self, query_str, top_k=4, limit_score=1.0):
        if not hasattr(self, 'product_item_knowledge'):
            self.load_all_knowledge_base()
        # 如果query_str以标点符号结尾，则去掉最后的标点符号
        doc_scores = self.product_item_knowledge.similarity_search_with_score(query_str, k=top_k)
        docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        for doc, score in doc_scores:
            if score < limit_score:
                docs.append(doc)
        return docs

    def search_insurance_knowledge(self,
                                   query,
                                   product_name,
                                   products_introduction,
                                   top_k=5,
                                   product_limit_score=1.2,
                                   knowledge_limit_score=0.7):
        # 检索保险条款
        iterm_results = []
        if product_name:
            search_clause_docs = self.insurance_knowledge.similarity_search_with_score(
                "保险产品:" + product_name + "\n" + query,
                k=len(self.insurance_knowledge.index_to_docstore_id))
            for doc, score in search_clause_docs:
                if doc.metadata["filename"] != product_name or doc.metadata["filename"] == "保险知识":
                    continue
                else:
                    if score < product_limit_score:
                        iterm_results.append((score, doc.page_content))

        # 检索保险知识，强制出
        general_results = []
        search_knowledge_docs = self.insurance_knowledge.similarity_search_with_score("保险知识:" + query, k=len(
            self.insurance_knowledge.index_to_docstore_id))
        for doc, score in search_knowledge_docs:
            if doc.metadata["filename"] == "保险知识" and score < knowledge_limit_score and len(general_results) < 2:
                general_results.append((0.02, doc.page_content))

        # 产品简介，强制出
        product_introduction_result = []
        if product_name and product_name in products_introduction:
            introduction = products_introduction[product_name]
            product_introduction_result.append((0.01, product_name + ' 产品简介\n' + introduction))

        return iterm_results[:top_k] + general_results + product_introduction_result

    def search_solutions(self,
                         query,
                         top_k=5,
                         limit_score=1.2,
                         max_search_num=1000,
                         if_print_score=False,
                         stage3_topk=None):
        if not hasattr(self, 'solutions'):
            self.load_all_knowledge_base()
        search_docs = self.solutions.similarity_search_with_score(query, k=min(len(self.solutions.index_to_docstore_id),
                                                                               max_search_num))
        # 默认用的是k nearest neighbors，值越小越相似, search_docs 已经是按照score升序拍好的list
        result = {'阶段一': [], '阶段二': [], '阶段三': []}
        for doc, score in search_docs:
            if score >= limit_score:
                continue
            solution_stage = doc.metadata['solution_stage']
            if stage3_topk and solution_stage == '阶段三':
                # 如果是阶段三则用单独阶段三的top个数（阶段三理论要多检索出一些产品内容，用于更多的选择）
                use_top_k = stage3_topk
            else:
                use_top_k = top_k
            if solution_stage in result and len(result[solution_stage]) < use_top_k:
                selected_metadata = {
                    '用户情境': doc.metadata['用户情境'],
                    '问题解决方案': doc.metadata['问题解决方案']
                }
                result[solution_stage].append(selected_metadata)
            if if_print_score:
                print('问题：', query, '=== 相似距离：', doc.metadata['问题解决方案'], score)
        return result

    def search_company_knowledge(self,
                         query,
                         type = '用户问题',
                         top_k=3,
                         limit_score=1,
                         max_search_num=100,
                         if_print_score=False):
        if not hasattr(self, 'solutions'):
            self.load_all_knowledge_base()
        query = type + ':' + query
        search_docs = self.company_knowledge.similarity_search_with_score(query, k=min(len(self.solutions.index_to_docstore_id),
                                                                               max_search_num))
        # 默认用的是k nearest neighbors，值越小越相似, search_docs 已经是按照score升序拍好的list
        result = []
        for doc, score in search_docs:
            if score >= limit_score or type not in doc.metadata:
                continue
            result.append(str(doc.metadata))
            if if_print_score:
                print('检索项', query, '=== 相似距离：', doc.metadata, score)
        return result[:top_k]


class OPENAIQuery(KBQuery):
    def __init__(self):
        embeddings = OpenAIEmbeddings(openai_api_key='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')
        KBQuery.__init__(self, embeddings)


class AZUREQuery(KBQuery):  # child class

    def __init__(self):
        embeddings = OpenAIEmbeddings(
            openai_api_key="45a5ee249f364e208dd950f87ab5aba7",
            openai_api_type="azure",
            openai_api_base="https://lingxi-openai.openai.azure.com",
            openai_api_version="2023-03-15-preview",
            deployment='ada-002',
            chunk_size=1)
        KBQuery.__init__(self, embeddings)


class BGEQuery(KBQuery):  # child class
    def __init__(self):
        embeddings = LocalEmbeddings()
        KBQuery.__init__(self, embeddings)


if __name__ == '__main__':
    import os
    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    bgeQuery = BGEQuery()
    # bgeQuery.save_product_item_from_csv(['raw_data/product_items/'])
    # bgeQuery.save_product_knowledge_from_txt('raw_data/insurance_knowledge')
    bgeQuery.save_solution_from_csv(['raw_data/solutions/'])
    bgeQuery.save_company_knowledge_from_csv(['raw_data/company_knowledge/'])
    bgeQuery.load_all_knowledge_base()
    # answer = bgeQuery.search_product_with_score('暖宝保', top_k=8)
    # answer = bgeQuery.search_product_item_with_score('保险产品:重疾险（保险类型）->i无忧2.0重大疾病保险\n产品属性:肿瘤', top_k=1,
    #                                                  limit_score=0.4)
    # answer = bgeQuery.search_insurance_knowledge("等待期", "小蜜蜂5号综合意外险-典藏版", '',top_k=20)
    # answer = bgeQuery.search_solutions("推荐的意外险产品", top_k=30, if_print_score=True)
    answer1 = bgeQuery.search_company_knowledge("医疗险",type='用户问题', top_k=3, if_print_score=True,limit_score=2)
    answer2 = bgeQuery.search_company_knowledge("赠险", type='沟通策略', top_k=3, if_print_score=True, limit_score=2)
    import pprint

    pprint.pprint(answer1)
    pprint.pprint(answer2)

    # plan_list = bgeQuery.search_plan_with_score('用户问题：孩子保障规划', top_k=4)
    # for plan in plan_list:
    #     plan_str = plan.metadata['方案因果图']
    #     print('plan_str:', plan_str)
    #     matching_result_list = plan_str.split("\n")
    #     plot_casual_graph(matching_result_list)

    # azureQuery = OPENAIQuery()
    # azureQuery.save_from_csv(['raw_data/insurance_planner_gpt/'])
    # result1 = azureQuery.search_with_score('用户问题:社保计算_北京社保缴纳10年养老金计算问题', top_k=4)
    # print('result1', result1)

    # ftpUtil.pushFile(['embedding_base/insurance_planner_gpt/index.pkl',
    #                   'embedding_base/insurance_planner_gpt/index.faiss'])

    # result1 = azureQuery.search_with_score('医疗险和重疾险有什么区别？我都要买吗？')
    # print('result1', result1)
    # azureQuery.save_from_csv(['raw_data/insurance_planner_gpt/规划师_用户疑义.csv',
    #                                    'raw_data/insurance_planner_gpt/规划师_用户问题.csv', 'raw_data/insurance_planner_gpt/规划师_解决方案.csv', 'raw_data/insurance_planner_gpt/规划师_专业知识.csv'])

    # openaiQuery = OPENAIQuery()
    # # openaiQuery.save_from_csv(['raw_data/insurance_planner_gpt/'])
    # openaiQuery.load_knowledge_base()
    # result2 = openaiQuery.search_with_score('保险规划_保费怎么计算？', top_k=4)
    # print('result2', result2)
    # result3 = openaiQuery.search_with_score('在什么地方养老比较合适')
    # print("result3",result3)
