# -*- coding:utf-8 -*-

# @Time  : 2024/3/8 1:42 PM
# @Author: chenyong
# @Email : chenyong@lingxi.ai
# @File  : text_embedding.py
import os
from knowledge_base.local_embedding import LocalEmbeddings
from common.log import logger
import traceback
import faiss
import numpy as np
import pickle
import time
import asyncio

curPath = os.path.abspath(os.path.dirname(__file__))

class KBQuery():
    def __init__(self):
        self.embeddings = LocalEmbeddings()

        self.knowledge_path = curPath + "/raw_data/insurance_knowledge"
        self.index_knowledge_path = curPath + "/embedding_base/insurance_knowledge_new/"
        if not os.path.exists(self.index_knowledge_path):
            os.makedirs(self.index_knowledge_path)
        self.doc_data_path = os.path.join(self.index_knowledge_path, "doc.pkl")
        self.doc_index_path = os.path.join(self.index_knowledge_path, "index.faiss")

        if not os.path.exists(self.doc_data_path) or not os.path.exists(self.doc_index_path):
            self.save_product_knowledge_from_txt(self.knowledge_path)

        self.load_insurance_knowledge()


    def save_product_knowledge_from_txt(self, knowledge_path):
        docpaths = [docname for docname in os.listdir(knowledge_path)
                    if not docname.startswith('.') and not docname.endswith('txt')]
        product_docs = []
        try:
            for doc in docpaths:
                docpath = os.path.join(knowledge_path, doc)
                for file in os.listdir(docpath):
                    filename = file.replace('.txt', '')
                    filepath = os.path.join(docpath, file)
                    with open(filepath, 'r', encoding='utf-8') as f:
                        if not filepath.endswith('txt'):
                            continue
                        contents = f.read()
                        paragraphs = contents.split('\n\n')
                        for paragraph in paragraphs:
                            if paragraph.strip():
                                if doc == "保险知识":
                                    product_content = {
                                        "filename": "保险知识",
                                        "content": paragraph.strip()
                                    }
                                else:
                                    product_content = {
                                        "filename": filename,
                                        "content": paragraph.strip()
                                    }
                                product_docs.append(product_content)

            logger.info('共有{}条数据'.format(len(product_docs)))
            page_contents = []
            for product_content in product_docs:
                if product_content["filename"] == "保险知识":
                    page_content = "保险知识:" + product_content["content"]
                else:
                    page_content = "保险产品:" + product_content["filename"] + "\n产品条款:" + product_content["content"]
                page_contents.append(page_content)
            # 将段落文本转换为向量并存储
            embeddings = self.embeddings.embed_documents(page_contents)
            embeddings_matrix = np.array(embeddings).astype('float32')
            index = faiss.IndexFlatL2(embeddings_matrix.shape[1])
            index.add(embeddings_matrix)
            # 存储
            with open(self.doc_data_path, 'wb') as f:
                pickle.dump(product_docs, f)
            faiss.write_index(index, self.doc_index_path)

        except Exception as e:
            logger.error(traceback.format_exc())


    def load_insurance_knowledge(self):
        self.doc_index = faiss.read_index(self.doc_index_path)
        with open(self.doc_data_path, 'rb') as f:
            self.doc_data = pickle.load(f)


    async def search_insurance_knowledge(self,
                                   query,
                                   product_name,
                                   products_introduction,
                                   top_k=5,
                                   product_limit_score=1.2,
                                   knowledge_limit_score=0.7):

        # 检索保险条款
        iterm_results = []
        if product_name:
            # 获取query向量
            query_vector = await self.embeddings.async_embed_query("保险产品:" + product_name + "\n" + query)
            D, I = self.doc_index.search(np.array([query_vector]).astype('float32'), len(self.doc_data))
            for score, index in zip(D[0], I[0]):
                doc = self.doc_data[index]
                if doc["filename"] != product_name or doc["filename"] == "保险知识":
                    continue
                if score < product_limit_score:
                    iterm_results.append((score, doc['content']))

        # 检索保险知识，强制出
        general_results = []
        # 获取query向量
        query_vector = await self.embeddings.async_embed_query("保险知识:" + query)
        D, I = self.doc_index.search(np.array([query_vector]).astype('float32'), len(self.doc_data))
        for score, index in zip(D[0], I[0]):
            doc = self.doc_data[index]
            if doc["filename"] == "保险知识" and score < knowledge_limit_score and len(general_results) < 2:
                general_results.append((0.02, doc['content']))

        # 产品简介，强制出
        product_introduction_result = []
        if product_name and product_name in products_introduction:
            introduction = products_introduction[product_name]
            product_introduction_result.append((0.01, product_name + ' 产品简介\n' + introduction))

        return iterm_results[:top_k] + general_results + product_introduction_result


if __name__ == "__main__":
    bgeQuery = KBQuery()
    product_name = "长相安长期医疗险（20年保证续保）-个人版"
    s_t = time.time()
    answer = asyncio.run(bgeQuery.search_insurance_knowledge("增值服务", product_name, "", top_k=20))
    print(f"run_time is: {time.time() - s_t}")
    print(answer)
