'''
    gptcache 精确匹配并实现上下文回答问题
'''
# 解决多个dll同时运行导致奔溃的问题
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"


# 智普ai配置
API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
MODEL_NAME='glm-4'
from gptcache.adapter.api import init_similar_cache
# langchain模型初始化
from langchain_openai import ChatOpenAI
llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,openai_api_key=API_KEY,base_url=BASE_PATH)

from gptcache.embedding import Huggingface
# gptcache 初始化
import hashlib
def get_hashed_name(name):
    return hashlib.sha256(name.encode()).hexdigest()

from gptcache import Cache
from gptcache.manager.factory import manager_factory
from gptcache.processor.pre import get_prompt,last_content
from langchain_community.cache import GPTCache
import time

from typing import Dict,Any
import json


# 精准匹配
def init_gptcache_exact(cache_obj: Cache, llm: str):
    hashed_llm = get_hashed_name(llm)
    cache_obj.init(
        pre_embedding_func=get_prompt,
        data_manager=manager_factory(manager="map", data_dir=f"@sys_cache/map_cache_{hashed_llm}"),        
    )


# 设置精确匹配
from langchain.globals import set_llm_cache
set_llm_cache(GPTCache(init_gptcache_exact))


question=['Tell me joke','Tell me a joke','Tell me two jokes','Tell me joke','I am bob','what is my name']
# question=['给我讲个笑话','请给我讲个笑话','给我讲两个笑话','我是张三','请问我叫什么名字']
# question=['你好我是amy','我喜欢吃西红柿我能做什么菜','还记得我叫什么吗','我再问你，我喜欢吃西红柿我能做什么菜']
from langchain.schema import HumanMessage,AIMessage

history=[]


for i in range(len(question)):
    start = time.time()
    history.append(HumanMessage(question[i])) 
    r = llm.invoke(history).content
    # r = llm.invoke(input=question[i]).content
    print(f'===Q{i+1}:{question[i]}======\n{r}')
    print(f'时间{i+1}：{time.time()-start} s')
    history.append(AIMessage(r))
    # history=[]

# print(history)
 
