# -*- coding:utf-8 -*-

# @Time  : 2023/11/29 7:53 PM
# @Author: chenyong
# @Email : chenyong@lingxi.ai
# @File  : document_search.py

import os
from knowledge_base.local_embedding import LocalEmbeddings
from common.log import logger
import traceback
import faiss
import numpy as np
import pickle
import time

curPath = os.path.abspath(os.path.dirname(__file__))

def match_product_name(product_name_sets, target_product_name):
    def jaccard_distance(sentence1, sentence2):
        try:
            sentence1 = sentence1.replace("医疗险", '').replace("重大疾病保险", '').replace("意外险", '')\
                .replace("门急诊保险", '').replace("医疗保险", '').replace("高端医疗", '')\
                .replace("百万医疗险", '').replace("运动保险", '').replace("旅游保险", '')\
                .replace("定期寿险", '').replace("医疗保险", '').replace("意外保险", '')\
                .replace("重疾险", '').replace("寿险", '').replace('门诊险', '').replace('（互联网）', '')\
                .replace("（互联网专属）", '').replace("（20年保证续保）", '')
            sentence2 = sentence2.replace("医疗险", '').replace("重大疾病保险", '').replace("意外险", '') \
                .replace("门急诊保险", '').replace("医疗保险", '').replace("高端医疗", '') \
                .replace("百万医疗险", '').replace("运动保险", '').replace("旅游保险", '') \
                .replace("定期寿险", '').replace("医疗保险", '').replace("意外保险", '')\
                .replace("重疾险", '').replace("寿险", '').replace('门诊险', '').replace('（互联网）', '') \
                .replace("（互联网专属）", '').replace("（20年保证续保）", '')
            set1 = set(sentence1.strip().replace(' ', '').replace('-', '').replace('_', '').replace('—', ''))
            set2 = set(sentence2.strip().replace(' ', '').replace('-', '').replace('_', '').replace('—', ''))
            intersection = set1.intersection(set2)
            union = set1.union(set2)
            jaccard_coefficient = float(len(intersection)) / (float(len(union)) + 0.000001)
            distance = 1 - jaccard_coefficient
        except Exception as e:
            distance = 1.0

        return distance

    min_distance = 0.9
    most_similar_name = None
    for name in product_name_sets:
        distance = jaccard_distance(target_product_name, name)
        if distance < min_distance:
            min_distance = distance
            most_similar_name = name

    return most_similar_name

def get_products_name(file_path):
    with open(file_path) as f:
        return [name.strip() for name in f.readlines()]

def get_products_introduction(file_path):
    products_introduction = {}
    with open(file_path, 'r') as f:
        contents = f.read().strip().split('\n\n')
        for content in contents:
            content = content.strip().split('\n')
            if len(content) > 1:
                product_name = content[0]
                introduction = '\n'.join(content[1:])
                products_introduction[product_name] = introduction
    return products_introduction


class DocSearch():
    def __init__(self):
        self.embeddings = LocalEmbeddings()

        self.knowledge_path = curPath + "/raw_data/insurance_knowledge"
        self.index_knowledge_path = curPath + "/embedding_base/insurance_knowledge/"
        if not os.path.exists(self.index_knowledge_path):
            os.makedirs(self.index_knowledge_path)
        self.doc_data_path = os.path.join(self.index_knowledge_path, "doc.pkl")
        self.doc_index_path = os.path.join(self.index_knowledge_path, "index.faiss")

        if not os.path.exists(self.doc_data_path) or not os.path.exists(self.doc_index_path):
            self.save_insurance_knowledge()

        self.load_insurance_knowledge()
        self.products_name = get_products_name(os.path.join(curPath, "raw_data/insurance_knowledge/products_name.txt"))

    def save_insurance_knowledge(self):
        docpaths = [docname for docname in os.listdir(self.knowledge_path)
                    if not docname.startswith('.') and not docname.endswith('txt')]
        product_docs = []
        embeddings = []
        try:
            for doc in docpaths:
                docpath = os.path.join(self.knowledge_path, doc)
                for file in os.listdir(docpath):
                    filename = file.replace('.txt', '')
                    print(filename)
                    filepath = os.path.join(docpath, file)
                    with open(filepath, 'r', encoding='utf-8') as f:
                        contents = f.read()
                        paragraphs = contents.split('\n\n')
                        for paragraph in paragraphs:
                            if paragraph.strip():
                                if doc == "保险知识":
                                    product_content = {
                                        "filename": "保险知识",
                                        "content": paragraph.strip()
                                    }
                                else:
                                    product_content = {
                                        "filename": filename,
                                        "content": paragraph.strip()
                                    }
                                product_docs.append(product_content)
                                # 将段落文本转换为向量并存储
                                embedding = self.embeddings.embed_query(paragraph.strip())
                                embeddings.append(embedding)
            logger.info('共有{}条数据'.format(len(product_docs)))
            embeddings_matrix = np.array(embeddings).astype('float32')
            index = faiss.IndexFlatL2(embeddings_matrix.shape[1])
            index.add(embeddings_matrix)

            with open(self.doc_data_path, 'wb') as f:
                pickle.dump(product_docs, f)
            faiss.write_index(index, self.doc_index_path)

        except Exception as ee:
            logger.error(traceback.format_exc())

    def load_insurance_knowledge(self):
        self.doc_index = faiss.read_index(self.doc_index_path)
        with open(self.doc_data_path, 'rb') as f:
            self.doc_data = pickle.load(f)

    def search_knowledge_in_file(self, query, product_name_input, top_k=5, limit_score=1.0):
        product_name = match_product_name(self.products_name, product_name_input)
        query_vector = self.embeddings.embed_query(query)
        # 检索与query最近邻的top_k个结果，D-相似度分值，I-检索结果的索引
        D, I = self.doc_index.search(np.array([query_vector]).astype('float32'), len(self.doc_data))
        results = []
        for distance, index in zip(D[0], I[0]):
            doc = self.doc_data[index]
            if doc["filename"] != product_name:
                continue
            if distance < limit_score:
                content = '\n'.join([product_name, doc['content']])
                if len(results) > 0 and distance < results[0][1]:
                    results.insert(0, (content, distance))
                else:
                    results.append((content, distance))

        return results[:top_k]


if __name__ == "__main__":
    # ds = DocSearch()
    # product_name = "长相安长期医疗险（20年保证续保） 个人版"
    # # product_name = "保险知识"
    # query = "一般医疗怎么报销？"
    # s_t = time.time()
    # results = ds.search_knowledge_in_file(query, product_name)
    # print(f"run_time is: {time.time() - s_t}")
    # print(results)

    product_name_sets = get_products_name(os.path.join(curPath, "raw_data/insurance_knowledge/products_name.txt"))
    target_product_name = "百万医疗（保险产品类型）->长相安长期医疗险（20年保证续保）"
    print(match_product_name(product_name_sets, target_product_name))

