# -*- coding:utf-8 -*-
import traceback
import requests
# @Time    : 2023/6/5 02:57
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : knowledge_embedding.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import sys
sys.path.append('../../../')
import os
import tqdm
import copy
from langchain import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
import pandas as pd

# from common.log import logger
# from knowledge_base.transport_utils import FtpUtil
from knowledge_base.local_embedding import LocalEmbeddings

# 获取该脚本的绝对路径
curPath = os.path.abspath(os.path.dirname(__file__))
import sys
import os
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)

# ftpUtil = FtpUtil()

class KBQuery():  # parent class
    def __init__(self, embeddings, knowledge_type):
        self.embeddings = embeddings

        if knowledge_type:
            self.folder_path = curPath + "/" + knowledge_type + "/embedding_base/"
            self.vocab_path = curPath + "/" + knowledge_type + "/voice_map.csv"
        else:
            self.folder_path = curPath + "/embedding_base/"
            self.vocab_path = curPath + "/voice_map.csv"
        if not os.path.exists(self.folder_path ):
            os.makedirs(self.folder_path )

    # 根据内容-录音名的dict
    def get_file_name_content_dict(self):
        vocab = pd.read_csv(self.vocab_path)
        file_name_content_dict = dict(
            zip(vocab['content'], vocab['file_name']))
        return file_name_content_dict

    # 添加录音地址到语料库中
    # def add_record_to_corpus(self, csv_paths):
    #     file_name_content_dict = self.get_file_name_content_dict()
    #
    #     for csv_path in csv_paths:
    #         if os.path.isdir(csv_path):
    #             for file in os.listdir(csv_path):
    #                 df = pd.read_csv(csv_path + file)
    #                 if "话术" in df:
    #                     df["录音编号"] = ""
    #                     for row in df["话术"]:
    #                         if row not in file_name_content_dict:
    #                             print(row)
    #                         else:
    #                             df["录音编号"][df["话术"] == row] = file_name_content_dict[row]
    #                     df.to_csv(csv_path + file, index=False)
    #
    #

    def save_from_csv(self, csv_paths):
        """
        保存FAQ到向量库
        Args:
            csv_path: csv文件路径, 列包含 标准问 扩展问 答案
        Returns: 无
        """
        try:
            meta_datas = list()
            for csv_path in csv_paths:
                if os.path.isdir(csv_path):
                    for file in os.listdir(csv_path):
                        if not file.endswith('.csv'):
                            continue
                        df = pd.read_csv(csv_path + file)
                        faqDictList = df.to_dict('records')
                        print(f'{file}加载成功, 共有{len(faqDictList)}条数据')
                        meta_datas.extend(faqDictList)
                else:
                    df = pd.read_csv(csv_path)
                    faqDictList = df.to_dict('records')
                    print(f'{csv_path}加载成功, 共有{len(faqDictList)}条数据')
                    meta_datas.extend(faqDictList)

            print('共有{}条数据'.format(len(meta_datas)))
            docs = list()

            meta_data_list = list()
            for index, meta_data in enumerate(meta_datas):
                meta_data = {k: v for k, v in meta_data.items() if pd.isna(v) == False}
                meta_data["index"] = index

                if "用户情境" in meta_data and pd.isna(meta_data["用户情境"]) == False and '/' in str(meta_data['用户情境']).strip():
                    for situation in str(meta_data['用户情境']).strip().split('/'):
                        data = copy.deepcopy(meta_data)
                        data["用户情境"] = situation
                        meta_data_list.append(data)

                else:
                    # print('原始数据', meta_data)
                    meta_data_list.append(meta_data)

            for index, meta_data in enumerate(meta_data_list):
                print('正在处理第{}条数据'.format(index))
                # 删除faq中值是nan的key

                if "用户问题" in meta_data:
                    page_content = "用户问题：" + str(meta_data['用户问题']).strip()
                    page_content_with_no_answer = page_content

                    if "用户情境" in meta_data and pd.isna(meta_data["用户情境"]) == False:
                        page_content += '\t用户情境：' + str(meta_data['用户情境']).strip()

                    if "话术" in meta_data:
                        page_content += '\t答案：' + str(meta_data['话术']).strip()
                    else:
                        page_content += '\t答案：' + str(meta_data['方案']).strip()

                    document = Document(
                        page_content=page_content,
                        metadata=meta_data)
                    docs.append(document)

                    document_with_no_answer = Document(
                        page_content=page_content_with_no_answer,
                        metadata=meta_data)
                    docs.append(document_with_no_answer)

                elif "沟通策略" in meta_data:
                    page_content = "沟通策略：" + str(meta_data['沟通策略']).strip()
                    page_content_with_no_answer = page_content


                    if "用户情境" in meta_data and pd.isna(meta_data["用户情境"]) == False:
                        page_content += '\t用户情境：' + str(meta_data['用户情境']).strip()


                    if "话术" in meta_data:
                        page_content += '\t话术：' + str(meta_data['话术']).strip()

                    document = Document(
                        page_content=page_content,
                        metadata=meta_data)
                    docs.append(document)

                    document_with_no_answer = Document(
                        page_content=page_content_with_no_answer,
                        metadata=meta_data)
                    docs.append(document_with_no_answer)


            db = FAISS.from_documents(docs, self.embeddings)
            db.save_local(folder_path=self.folder_path)

        except Exception as ee:
            print(traceback.format_exc())


    def load_knowledge_base(self):
        """
        加载向量库
        Returns: 无
        """
        self.local_knowledge_base = FAISS.load_local(folder_path=self.folder_path,
                         embeddings=self.embeddings)

    def search(self, query_str, top_k=4):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        docs = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        return docs

    # 判断用户情境是否匹配, 纯文本匹配
    def is_situation_match(self, doc_situation, user_situation):
        if "," in doc_situation:
            doc_situation_list = str(doc_situation).split(",")
        else:
            doc_situation_list = [doc_situation]
        for doc_situation in doc_situation_list:
            if not (doc_situation in str(user_situation.values())):
                return False
        return True

    # 尽可能保障召回率
    async def search_with_score(self, query_str, situation, top_k=6, limit_score=1.2):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        # 如果query_str以标点符号结尾，则去掉最后的标点符号
        # if query_str[-1] in ['。', '？', '！', '，', '；', '：', '、', '（', '）', '【', '】', '《', '》', '“', '”', '‘', '’']:
        #     query_str = query_str[:-1]
        if "沟通策略" in query_str:
            doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k * 20)
        else:
            doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k * 20)
        docs = []
        index_set = set()
        filter_docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        #
        process_doc_scores = list()
        if doc_scores:
            for doc, score in doc_scores:
                # print(score, doc)
                if ("用户情境" in doc.metadata) and (doc.metadata["用户情境"]) and \
                        not pd.isna(doc.metadata["用户情境"]):
                    doc_situation = doc.metadata["用户情境"]
                    doc_situation_list = str(doc_situation).split(",")
                    doc_situation_list_copy = copy.deepcopy(doc_situation_list)
                    for right in doc_situation_list_copy:
                        if "APP" in right or "call" in right:
                            doc_situation_list.remove(right)
                    length = len(doc_situation_list)
                    if "利息降" in doc_situation:
                        length = length + 1
                    if "到期" in doc_situation:
                        length = length + 1
                    if "提升" in doc_situation:
                        length = length + 1
                    if "25" in doc_situation or "35" in doc_situation or "45" in doc_situation or \
                        "55" in doc_situation or "65" in doc_situation:
                        length = length + 1

                    if score < limit_score and length:
                        score = score / (1+(length-1) * 0.2)
                process_doc_scores.append([doc, score])
        # 根据score降序排列
        doc_scores = sorted(process_doc_scores, key=lambda x: x[1], reverse=False)

        for doc, score in doc_scores:
            if score < limit_score:
                ## todo hardcode 如果输入是是否有app，则不返回打开app
                if "询问有无App" in query_str:
                    if "引导打开App" in doc.page_content:
                        continue
                if "加微申请提醒" in query_str:
                    if "加微意向确认" in doc.page_content:
                        continue
                if "加微意向确认" in query_str:
                    if "加微申请提醒" in doc.page_content:
                        continue
                if "介绍借款页面" in query_str:
                    if "介绍活动" in doc.page_content:
                        continue
                if "加微意向确认" in query_str:
                    if "表明来意" in doc.page_content:
                        continue
                if "表明来意" in query_str:
                    if "介绍活动" in doc.page_content:
                        continue

                if doc.metadata["index"] not in index_set:

                    if ("用户情境" in doc.metadata) and (doc.metadata["用户情境"]) and not pd.isna(doc.metadata["用户情境"]):
                        if self.is_situation_match(doc.metadata["用户情境"], situation):
                            index_set.add(doc.metadata["index"])
                            docs.append(doc)
                            # print(score, doc)
                            if len(docs) >= top_k:
                                break
                        else:
                            filter_docs.append(doc)
                    else:
                        index_set.add(doc.metadata["index"])
                        docs.append(doc)
                        # print(score, doc)
                        if len(docs) >= top_k:
                            break
        # 如果全部被用户情境过滤掉了，则不考虑用户情境
        if not docs and filter_docs:
            for doc in filter_docs:
                if doc.metadata["index"] not in index_set:
                    index_set.add(doc.metadata["index"])
                    docs.append(doc)

                    if len(docs) >= top_k:
                        break

        return docs

class OPENAIQuery(KBQuery):
    def __init__(self, knowledge_type):
        embeddings = OpenAIEmbeddings(openai_api_key='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')
        KBQuery.__init__(self, embeddings, knowledge_type)

class AZUREQuery(KBQuery):  # child class

    def __init__(self, knowledge_type):
        embeddings = OpenAIEmbeddings(
            openai_api_key = "45a5ee249f364e208dd950f87ab5aba7",
            openai_api_type = "azure",
            openai_api_base = "https://lingxi-openai.openai.azure.com",
            openai_api_version = "2023-03-15-preview",
            deployment='ada-002',
            chunk_size=1)
        KBQuery.__init__(self, embeddings, knowledge_type)

class BGEQuery(KBQuery):  # child class
    def __init__(self, knowledge_type=''):
        embeddings = LocalEmbeddings()
        KBQuery.__init__(self, embeddings, knowledge_type)

if __name__ == '__main__':
    import asyncio
    azureQuery = BGEQuery() # 需要使用的知识库名称，加载，保存，向量文件位置同使用此标示
    azureQueryNew = BGEQuery('new')

    # azureQuery.add_record_to_corpus(['raw_data/'])
    azureQuery.save_from_csv(['raw_data/']) # 知识库地址
    azureQueryNew.save_from_csv(['new_raw_data/'])
    azureQuery.load_knowledge_base()
    similarity_knowledge = asyncio.run(azureQuery.search_with_score(
        '沟通策略：介绍活动-没关注',
        {'用户姓名': '张女士', '用户年龄': '23', '初始用户权益': '已获得提额,已获得降息', '降息幅度': '利息降到最低',
         '降息剩余天数': '降息5天到期', 'call次': '1-call', '用户阶段': '成长期',
         '降息力度': '65到45', '提额力度': '提升近九万', '是否已打开APP': '未打开APP'},
        top_k=8,
    ))

    # 判断中字符串的长度，累积长度要是大于1024的话，跳出循环
    knowledge_json = {
        'data': list()
    }
    for index, item in enumerate(similarity_knowledge):
        print('{}--{}'.format(index,item))
        knowledge_json['data'].append(item)

        # if len("".join(background_knowledge)) > 1024:
        #     break



    '''
    from openpyxl import load_workbook, Workbook

    # 线上知识库录音编号
    wb = load_workbook('./2call_online_bot.xlsx')
    ws = wb[wb.sheetnames[0]]

    wav_no_2call_online_list = list()
    for i, row in enumerate(ws.values):
        if i != 0:
            wav_no_online = row[0]
            if wav_no_online not in wav_no_2call_online_list:
                wav_no_2call_online_list.append(wav_no_online)

    
    # 知识库中录音存在与线上版本中
    data_list = list()
    wav_no_1_list = list()
    file = './2call_knowledge/金融电销_简单用户问题.csv'

    df = pd.read_csv(file)
    faqDictList = df.to_dict('records')
    print(f'{file}加载成功, 共有{len(faqDictList)}条数据')
    for faq in faqDictList:
        # print(faq['录音编号'])
        if faq['录音编号'] not in wav_no_1_list:
            if faq['录音编号'] in wav_no_2call_online_list:
                data_list.append(faq)

            wav_no_1_list.append(faq['录音编号'])

    new_datas = pd.DataFrame(data_list)
    new_datas.to_csv('./2call_knowledge/2call_简单用户问题.csv', index=False)


    data_list = list()
    wav_no_2_list = list()
    file = './2call_knowledge/金融电销_沟通策略.csv'

    df = pd.read_csv(file)
    faqDictList = df.to_dict('records')
    print(f'{file}加载成功, 共有{len(faqDictList)}条数据')
    for faq in faqDictList:
        # print(faq['录音编号'])
        if faq['录音编号'] not in wav_no_2_list:
            if faq['录音编号'] in wav_no_2call_online_list:
                data_list.append(faq)

            wav_no_2_list.append(faq['录音编号'])

    new_datas = pd.DataFrame(data_list)
    new_datas.to_csv('./2call_knowledge/2call_沟通策略.csv', index=False)


    wb_w = Workbook()
    ws_w = wb_w.active
    ws_w.append([
        '新增录音编号',
    ])
    # 线上版本中不存在当前知识库的录音
    new_wav_no_list = list()
    for wav_no in tqdm.tqdm(wav_no_2call_online_list):
        if wav_no not in wav_no_1_list and wav_no not in wav_no_2_list:
            new_wav_no_list.append(wav_no)
            ws_w.append([wav_no])

    print(len(new_wav_no_list))
    wb_w.save('./新录音编号.xlsx')
    '''



