# -*- coding:utf-8 -*-
import traceback
import requests
# @Time    : 2023/6/5 02:57
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : knowledge_embedding.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

import openai
import os
from langchain import FAISS, OpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
import pandas as pd

import settings
from common.log import logger
from common import constants
from knowledge_base.transport_utils import FtpUtil
from knowledge_base.local_embedding import LocalEmbeddings

# 获取该脚本的绝对路径
curPath = os.path.abspath(os.path.dirname(__file__))

ftpUtil = FtpUtil()
class KBQuery():  # parent class
    def __init__(self,embeddings):
        self.embeddings = embeddings

        self.folder_path = curPath + "/embedding_base/"
        if not os.path.exists(self.folder_path ):
            os.makedirs(self.folder_path )


    def save_from_csv(self, csv_paths):
        """
        保存FAQ到向量库
        Args:
            csv_path: csv文件路径, 列包含 标准问 扩展问 答案
        Returns: 无
        """
        try:
            faqs = []
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        faqDictList = df.to_dict('records')
                        logger.info(f'{file}加载成功, 共有{len(faqDictList)}条数据')
                        faqs.extend(faqDictList)
                else:
                    df = pd.read_csv(csv_path)
                    faqDictList = df.to_dict('records')
                    logger.info(f'{csv_path}加载成功, 共有{len(faqDictList)}条数据')
                    faqs.extend(faqDictList)

            logger.info('共有{}条数据'.format(len(faqs)))
            docs = []
            for faq in faqs:
                if "主流程" in faq:
                    page_content = "主流程:" + faq['主流程'].strip() + '\n话术:' + faq['话术'].strip()
                elif "回复话术" in faq:
                    page_content = "用户问题:" + faq['用户问题'].strip() + '\n回复话术:' + faq['回复话术'].strip()
                else:
                    page_content = "用户问题:" + faq['知识点'].strip() + '\n知识内容:' + faq['知识内容'].strip()
                document = Document(
                    page_content=page_content,
                    metadata=faq)
                docs.append(document)
            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.folder_path)

        except Exception as ee:
            logger.error(traceback.format_exc())
    def load_knowledge_base(self):
        """
        加载向量库
        Returns: 无
        """
        self.local_knowledge_base = FAISS.load_local(folder_path=self.folder_path,
                         embeddings=self.embeddings,allow_dangerous_deserialization=True)

    def search(self, query_str, top_k=4):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        docs = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        return docs

    def search_with_score(self, query_str, top_k=4, limit_score=0.35):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        # 如果query_str以标点符号结尾，则去掉最后的标点符号
        if query_str[-1] in ['。', '？', '！', '，', '；', '：', '、', '（', '）', '【', '】', '《', '》', '“', '”', '‘', '’']:
            query_str = query_str[:-1]
        doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        for doc, score in doc_scores:
            if score < limit_score:
                docs.append(doc)
        return docs

class OPENAIQuery(KBQuery):
    def __init__(self):
        embeddings = OpenAIEmbeddings(openai_api_key='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')
        KBQuery.__init__(self, embeddings)

class AZUREQuery(KBQuery):  # child class

    def __init__(self):
        embeddings = OpenAIEmbeddings(
            openai_api_key = "45a5ee249f364e208dd950f87ab5aba7",
            openai_api_type = "azure",
            openai_api_base = "https://lingxi-openai.openai.azure.com",
            openai_api_version = "2023-03-15-preview",
            deployment='ada-002',
            chunk_size=1)
        KBQuery.__init__(self, embeddings)

class BGEQuery(KBQuery):  # child class
    def __init__(self):
        embeddings = LocalEmbeddings()
        KBQuery.__init__(self, embeddings)

if __name__ == '__main__':
    azureQuery = BGEQuery()
    azureQuery.save_from_csv(['raw_data/'])
    result1 = azureQuery.search_with_score('用户问题:叫什么', top_k=6, limit_score=1.1)
    print('result1', result1)

    # ftpUtil.pushFile(['embedding_base/insurance_planner_gpt/index.pkl',
    #                   'embedding_base/insurance_planner_gpt/index.faiss'])

    # result1 = azureQuery.search_with_score('医疗险和重疾险有什么区别？我都要买吗？')
    # print('result1', result1)
    # azureQuery.save_from_csv(['raw_data/insurance_planner_gpt/规划师_用户疑义.csv',
    #                                    'raw_data/insurance_planner_gpt/规划师_用户问题.csv', 'raw_data/insurance_planner_gpt/规划师_解决方案.csv', 'raw_data/insurance_planner_gpt/规划师_专业知识.csv'])

    '''
    openaiQuery = OPENAIQuery("embedding_base/insurance_planner_gpt")
    openaiQuery.save_from_csv(['raw_data/insurance_planner_gpt/规划师_用户疑义.csv',
                                       'raw_data/insurance_planner_gpt/规划师_用户问题.csv'])
    # result2 = openaiQuery.search_with_score('意外险怎么选？')
    # result3 = openaiQuery.search_with_score('在什么地方养老比较合适')
    # print('result2', result2)
    # print("result3",result3)
    '''
