import itertools

from langchain import FAISS
from langchain.schema import Document
from langchain.embeddings import OpenAIEmbeddings

import traceback
import os

pro_path = os.path.dirname(os.path.realpath(__file__))

from bot.enterprise_wechat_gpt.agent.extract_user_question import *
from bot.enterprise_wechat_gpt.agent.extract_user_situation import *

openai.api_base = "https://api.xiaojuan.tech/v1"
keys = [
    'sk-5cQ2llvb3JWjpI4vHHozIOzzPGJWRFB7rBZOfhNgkDAZkjzd',
    'sk-SBiHXxMmpUKOsJ0t4LqK9jduviMWX7b31M0h8K31uu7aECIs'
]
random.shuffle(keys)
keys_cycle = itertools.cycle(keys)


class KBQuery:  # parent class
    def __init__(self, folder_path, embeddings):
        self.embeddings = embeddings
        self.folder_path = folder_path

    def save_from_csv(self, csv_paths):
        """
        保存FAQ到向量库
        csv_path: csv文件路径, 列包含 标准问 扩展问 答案
        Returns: 无
        """
        try:
            faqs = []  # 多个csv文件 每一行变成dict，收集成list
            for csv_path in csv_paths:
                df = pd.read_csv(csv_path)
                faqDictList = df.to_dict('records')
                logging.info(f'{csv_path} 加载成功, 共有{len(faqDictList)}条数据')
                faqs.extend(faqDictList)

            logging.info('共有{}条数据'.format(len(faqs)))
            docs = [Document(page_content=f"问题:{faq.get('问题')}\n答案:{faq.get('答案')}\n",
                             metadata={'问题': faq.get('问题'), '答案': faq.get('答案')}) for faq in faqs]
            db = FAISS.from_documents(docs, self.embeddings)
            print('生成FAISS知识库索引ing: ', db)
            db.save_local(folder_path=self.folder_path)
        except Exception as ee:
            logging.error(traceback.format_exc())

    def load_knowledge_base(self):  # 加载向量库
        self.local_knowledge_base = FAISS.load_local(folder_path=self.folder_path, embeddings=self.embeddings)

    def search(self, query_str, top_k=4):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        docs = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        return docs

    def search_with_score(self, query_str, top_k=3, limit_score=0.5):
        if not hasattr(self, 'local_knowledge_base'):
            self.load_knowledge_base()
        doc_scores = self.local_knowledge_base.similarity_search_with_score(query_str, k=top_k)
        docs = []
        # 默认用的是k nearest neighbors，值越小越相似
        for doc, score in doc_scores:
            if score < limit_score:
                docs.append({'answer': doc.page_content.split('\n')[-2].replace('答案:', ''), 'match_q': doc.metadata.get('问题', ''), 'score': str(score)})
            else:
                docs.append({})
        return docs


class AZUREQuery(KBQuery):  # child class
    def __init__(self, folder_path):
        embeddings = OpenAIEmbeddings(
            openai_api_key="45a5ee249f364e208dd950f87ab5aba7",
            openai_api_type="azure",
            openai_api_base="https://lingxi-openai.openai.azure.com",
            openai_api_version="2023-03-15-preview",
            deployment='ada-002',
            chunk_size=1)
        KBQuery.__init__(self, folder_path, embeddings)


class OPENAIQuery(KBQuery):
    def __init__(self, folder_path):
        key = next(keys_cycle)
        embeddings = OpenAIEmbeddings(openai_api_key=key)
        KBQuery.__init__(self, folder_path, embeddings)


###########################################################################################################################################################
def get_Query():
    # return OPENAIQuery(pro_path + "/knowledge_embedding")
    return AZUREQuery(pro_path + "/knowledge_embedding")


openaiQuery = get_Query()


def gen_knowledge():
    openaiQuery.save_from_csv(
        ['raw_data/知识库：问题和答案.csv',
         'raw_data/知识库：复杂问题和方案.csv',
         'raw_data/知识库：企业主贷借钱.csv',
         # 'raw_data/知识库：白条和分分卡.csv'
         ])


def search_knowledge(user_question, user_situation='', user_say=''):
    """
    功能：从知识库检索知识
    user_question用户问题
    user_situation用户情景
    """
    # 没有用户问题直接返回空
    if not user_question:
        return []
    start_time = time.time()
    res_knowledge = [{}]
    query = get_query(user_question, user_situation, user_say)
    if query:
        res_knowledge = openaiQuery.search_with_score(query)

    # num = len(res_knowledge) if res_knowledge[0] != {} else 0
    user_id = local_data.user_id
    res_multi_knowledge = [{'用户问题': s.get('match_q', ''), '答案': s.get('answer', ''), '距离': s.get('score', '')} for s in res_knowledge]
    logging.info(f"userId:[{user_id}] query:{query} \n 查询知识库耗时：{time.time() - start_time}s \n知识库返回: {json.dumps(res_multi_knowledge, ensure_ascii=False, indent=2)}")
    return res_multi_knowledge


def get_query(uq, us, user_say):
    query = ''
    if uq:
        query += uq
    if us:
        query += us
    if user_say:
        query += "|" + user_say
    return query


if __name__ == "__main__":
    gen_knowledge()     # 更新知识库
    user = '额度不够了'
    raw_conversation_history = [
        {USER: user}
    ]
    deal_conversation_history = merge_role_conversation(raw_conversation_history)

    tasks_1 = [get_user_question(deal_conversation_history), get_user_situation(deal_conversation_history)]
    loops = asyncio.get_event_loop()
    user_question, user_situation = loops.run_until_complete(asyncio.gather(*tasks_1))
    res_multi_knowledge = search_knowledge(user_question, user_situation, user)
