"""
探索中

"""

from operator import itemgetter
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.vectorstores import FAISS
from PyCmpltrtok.common import sep
import pymongo as pm
from PyCmpltrtok.util_mongo import get_sorted_by_key, VALUE, KEY, get_history
from python_nlp.kg.neo4j.load_data.data2mongo import EMBED_TBL as EMBED_TBL_KG  # 原有嵌入和langchain不匹配
from python_nlp.kg.neo4j.load_data.data2mongo_langchain import EMBED_TBL as EMBED_TBL_KG_LANGCHAIN
from python_nlp.embed.cblue_text2mongo import USERNAME, EMBED_COL
from xregexp import search_letters

K = 10

if '__main__' == __name__:
    # 连接Mongodb
    if 1:
        IP = '127.0.0.1'
    else:
        IP = '172.20.240.1'  # IP of WSL's Windows host 【每次开WSL都会变！】
    mongo = pm.MongoClient(IP, 27017, serverSelectionTimeoutMS=3000)
    mdb = mongo['CBLUE']
    print(mdb)
    get_history(mdb, 'u_try_it', limit=1)  # 测试mongodb的有效性

    # 从mongodb加载数据
    print('Loading from mongo ...', EMBED_TBL_KG_LANGCHAIN)
    xrows = get_sorted_by_key(mdb, EMBED_TBL_KG_LANGCHAIN, USERNAME)
    xlist = []
    xsentences = []
    xmd5s = []
    for i, xrow in enumerate(xrows):
        xembed = xrow.get(EMBED_COL, None)
        if not xembed:
            print('!', end='')
            continue
        xlist.append(xembed)
        xsentences.append(xrow[VALUE])
        xmd5s.append(xrow[KEY])
        print('.', end='')
        if i % 50 == 0:
            print(i)
    print()
    if not xlist:
        print('No data found!')
        sys.exit(1)

    sep('vectorstore')
    list_of_zip = list(zip(xsentences, xlist))
    vectorstore = FAISS.from_embeddings(list_of_zip, embedding=HuggingFaceEmbeddings(  # OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
        # model_name='moka-ai/m3e-base'
        model_name=r'D:\_const\wsl\my_github\m3e-base'
    ), ids=xmd5s)

    sep('retriever')
    retriever = vectorstore.as_retriever(search_kwargs={
        'k': K,
    })

    sep('template')
    template = """根据下面的上下文回答问题：
{context}

问题: {question}"""
    prompt = PromptTemplate.from_template(template)

    sep('model')
    model = OpenAI(
        streaming=True,
        verbose=True,
        callbacks=[],
        openai_api_key="token1",
        openai_api_base=f"http://127.0.0.1:6001/v1",
        model_name="chatglm2-6b",
        temperature=0.0,
        openai_proxy=None,
        top_p=1.0,
        max_tokens=2048,
    )

    sep('chain')
    chain = (
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | model
            | StrOutputParser()
    )
    chain_manual_rag = prompt | model

    ALL_LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'

    sep('template2')
    template2 = """下面的选项和问题无关的是哪几个？只回答选项字母组合。
问题: {question}
选项: 
{options}
"""
    prompt2 = PromptTemplate.from_template(template2)

    sep('chain2')
    chain2 = prompt2 | model

    while True:
        print('Input: ')
        xinput = input().strip()

        # 主动获取相似的句子
        sep('Similar sentences')
        xres = retriever.get_relevant_documents(xinput)
        print(xres)
        xres_ = xres
        for i, doc in enumerate(xres_):
            print(i, ALL_LETTERS[i], doc.page_content)

        # 不使用RAG，直接问LLM
        sep('Answer Directly')
        xres = model.invoke(xinput)
        print(xres)

        # RAG
        sep('Answer under Context')
        xres = chain.invoke(xinput)
        print(xres)

        # 无关性判断
        sep('Dissimilarity analysis')
        options = ''
        for i, doc in enumerate(xres_):
            options += ALL_LETTERS[i] + ': ' + doc.page_content + '\n\n'
        xres = chain2.invoke({
            'question': xinput,
            'options': options
        })
        print(xres)
        letters_str = xres

        # 基于无关性判断的RAG
        sep('RAG based on Positive context')
        xletters = search_letters(letters_str)
        xletters = sorted(xletters)
        print('Negative:', xletters)
        xletters_set = set(ALL_LETTERS[:K]) - set(xletters)
        print('Positive:', sorted(xletters_set))
        context = ''
        for i, doc in enumerate(xres_):
            xletter = ALL_LETTERS[i]
            if xletter in xletters_set:
                context += doc.page_content + '\n\n'
        print(f'|{context}|')
        xres = chain_manual_rag.invoke({
            'context': context,
            'question': xinput,
        })
        print(xres)

