import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"


# 智普ai配置
API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
MODEL_NAME='glm-4'

# langchain模型初始化
from langchain_openai import ChatOpenAI
llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,openai_api_key=API_KEY,base_url=BASE_PATH)

# gptcache 初始化
from gptcache import Cache
from langchain_community.cache import GPTCache
from gptcache.manager.factory import get_data_manager,CacheBase, VectorBase
from gptcache.adapter.api import init_similar_cache
from gptcache.processor.pre import last_content, get_prompt
from gptcache.embedding import Huggingface
from gptcache import Config
from gptcache.processor.post import first

import time
import hashlib
def get_hashed_name(name):
    return hashlib.sha256(name.encode()).hexdigest()
# 相似匹配
def init_gptcache_similar(cache_obj: Cache, llm: str):
    hashed_llm = get_hashed_name(llm)    
    embedding = Huggingface(model='google-bert/bert-base-chinese')
    # embedding = Onnx()
    cache_base = CacheBase('sqlite')
    vector_base = VectorBase('faiss', dimension=embedding.dimension,max_size=100000)
    data_manager = get_data_manager(cache_base, vector_base)
    from gptcache.similarity_evaluation import SbertCrossencoderEvaluation
    t = int(time.time())
    init_similar_cache(
        cache_obj=cache_obj,
        embedding=embedding,
        pre_func=last_content,
        evaluation=SbertCrossencoderEvaluation(),
        data_manager=data_manager,       
        data_dir=f"./@sys_cache/test_cache_{t}_{hashed_llm}",
        post_func=first,
        config=Config(
                   similarity_threshold=0.9,
                    ),
        )





from langchain.globals import set_llm_cache
# 设置相似匹配
set_llm_cache(GPTCache(init_gptcache_similar))

# question=['Tell me joke','Tell me two jokes','Tell me a joke','I am bob','what is my name']
question=['给我讲个笑话','请给我讲个笑话','给我讲两个笑话','我是张三','请问我叫什么名字']
# question=['你好我是amy','我喜欢吃西红柿我能做什么菜','还记得我叫什么吗','我再问你，我喜欢吃西红柿我能做什么菜']
from langchain.schema import SystemMessage, HumanMessage,AIMessage

history=[SystemMessage('你是智能助手，将回答用户的问题')]


for i in range(len(question)):
    start = time.time()
    history.append(HumanMessage(question[i])) 
    r = llm.invoke(history).content
    # r = llm.invoke(input=question[i]).content
    print(f'===Q{i+1}:{question[i]}======\n{r}')
    print(f'时间{i+1}：{time.time()-start} s')
    if len(history)>3:
        history.pop()
    history.append(AIMessage(r))
    # history=[]
