import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"


from langchain.schema import HumanMessage,AIMessage
from gptcache.adapter.langchain_models import LangChainChat




# 智普ai配置
API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
MODEL_NAME='glm-4'
# langchain模型初始化
from langchain_community.chat_models import ChatOpenAI
llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,openai_api_key=API_KEY,base_url=BASE_PATH)

# gptcache配置
from gptcache.adapter.api import init_similar_cache
def get_msg(data, **_):
    c = data.get("messages")[-1].content
    print(f'====get_msg缓存:{c}==\n')
    return data.get("messages")[-1].content

def get_last_content(data,**_):
    prompt = data.get('prompt')
    import json
    r = json.loads(prompt)
    c = r[-1]['kwargs']['content']
    print(f'====问题：{c}====\n')
    return r[-1]['kwargs']['content']

from gptcache.similarity_evaluation import SbertCrossencoderEvaluation,SearchDistanceEvaluation
# def init_gpt_similar_cache():
#     init_similar_cache(
#         pre_func=get_msg,
#         evaluation=SbertCrossencoderEvaluation(),
#     )
from gptcache.embedding import Huggingface
def init_gpt_similar_cache():
    embedding = Huggingface()
    init_similar_cache(
        embedding=embedding,
        pre_func=get_msg,
        evaluation=SbertCrossencoderEvaluation()
    )

from gptcache.processor.pre import get_prompt
def init_gpt_similar_cache1():
    embedding = Huggingface()
    init_similar_cache(
        embedding=embedding,
        pre_func=get_prompt,
        evaluation=SbertCrossencoderEvaluation()
    )


def get_hashed_name(name):
    import hashlib
    return hashlib.sha256(name.encode()).hexdigest()
from gptcache import Cache
# def init_gpt_test_cache(cache_obj:Cache,llm:str):
#     name = get_hashed_name(llm)
#     init_similar_cache(
#         cache_obj=cache_obj,
#         pre_func=get_last_content,
#         evaluation=SbertCrossencoderEvaluation(),
#         data_dir=f'@sys_cache/test_cache_{name}'
#     )
def my_post_func(data):
    print('data====',data)
    return data
def init_gpt_test_cache(cache_obj:Cache,llm:str):
    name = get_hashed_name(llm)
    embedding = Huggingface()
    init_similar_cache(
        cache_obj=cache_obj,
        embedding=embedding,
        pre_func=get_last_content,
        evaluation=SbertCrossencoderEvaluation(),
        data_dir=f'@sys_cache/test_cache_{name}',
        
    )
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough

# 封装物品信息模板
def init_goods_info_prompt(llm=None):
    systemPrompt = """您是用户的智能管家，你对中文的理解非常棒，用户会给出一个物品名称，你需要根据用户给出的物品名称，先判断这是不是一个物品，如果是物品就返回这个物品的用途和使用方法，按如下格式给到用户：
    \n*** 用途 ***\n
    1. ....\n
    2. ....\n
    ....\n
    \n*** 使用方法***\n
    1. ....\n
    2. ....\n
    ....\n

    ，无需其他的文字做末尾总结。如果不是一个物品的名称返回“无”，无需其他的文字做末尾总结或提示用户”"""
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system",systemPrompt),
            ("user","{input}")
        ]
    )
    return prompt
    # chain = {"input":RunnablePassthrough()}| prompt | llm 
     
    # return chain 

def run_chat_model():
    init_gpt_similar_cache()

    chat = LangChainChat(chat=llm)

    history = []
    question=['马铃薯','番茄','土豆','马铃薯','番茄','土豆','马铃薯','番茄','土豆','马铃薯','番茄','土豆']
    # question=['Tell me joke','Tell me a joke','Tell me two jokes','Tell me joke','I am bob','what is my name']
    # question=['给我讲个笑话','请给我讲个笑话','给我讲两个笑话','给我讲个笑话','我叫韩十一','请问我叫什么']
    import time
    for i in range(len(question)):
        start = time.time()
        # history.append(HumanMessage(question[i]))
        history.append(HumanMessage(question[i]))
        r = chat(messages=history).content
        print(f'===Q{i+1}:{question[i]}======\n{r}')
        print(f'时间{i+1}：{time.time()-start} s')
        history.append(AIMessage(r))    

from gptcache.adapter.langchain_models import LangChainLLMs
def run_llm_model():
    init_gpt_similar_cache1()
    prompt = init_goods_info_prompt()
    chat = LangChainLLMs(llm=llm)

    history = []
    question=['马铃薯','番茄','土豆','马铃薯','番茄','土豆','马铃薯','番茄','土豆','马铃薯','番茄','土豆']
    # question=['Tell me joke','Tell me a joke','Tell me two jokes','Tell me joke','I am bob','what is my name']
    # question=['给我讲个笑话','请给我讲个笑话','给我讲两个笑话','给我讲个笑话','我叫韩十一','请问我叫什么']
    import time
    for i in range(len(question)):
        start = time.time()
        # history.append(HumanMessage(question[i]))
        history.append(question[i])
        r = chat(prompt=prompt,input=history).content
        print(f'===Q{i+1}:{question[i]}======\n{r}')
        print(f'时间{i+1}：{time.time()-start} s')
        history.append(AIMessage(r))    


def run_chain_model():
    from langchain.globals import set_llm_cache
    from langchain_community.cache import GPTCache
    set_llm_cache(GPTCache(init_gpt_test_cache))
    chain = init_goods_info_prompt(llm)
    history = []
    question=['马铃薯','番茄','土豆','马铃薯','番茄','土豆']
    # question=['Tell me joke','Tell me a joke','Tell me two jokes','Tell me joke','I am bob','what is my name']
    # question=['给我讲个笑话','请给我讲个笑话','给我讲两个笑话','给我讲个笑话','我叫韩十一','请问我叫什么']
    import time
    for i in range(len(question)):
        start = time.time()
        history.append(HumanMessage(question[i]+'是什么')) 
        r = chain.invoke(question[i]).content
        print(f'===Q{i+1}:{question[i]}======\n{r}')
        print(f'时间{i+1}：{time.time()-start} s')
        history.append(AIMessage(r))    


if __name__ == "__main__":
    # run_chat_model()
    # run_chain_model()
    run_llm_model()