"""
当前使用的方案，gptcache无法在stream的时候进行使用

"""

# 系统库
import os
import re
import threading
import time

# 第三方库
from flask import  (current_app,Blueprint,Response)
from langchain.globals import set_llm_cache
from langchain_community.cache import GPTCache
from langchain.schema import HumanMessage,AIMessage
from langchain_core.messages import AIMessageChunk

# 自定义库
from common import config
from utils.langchain_about import init_model,get_session_history,set_chain,get_result
from utils.api_cache import init_gpt_exact_cache
from utils.message import send_json_msg
from utils.do_db import verify_visit
from utils.do_http_res import get_ai_msg_parmams,stream_output,no_stream_output
from utils.upload_about import get_var_from_file
from utils.my_callback_handler import MyCallbackHandler

api_bp = Blueprint('api', __name__)

# 定义大语言模型 默认LLM 是MoonShot
current_key = config['Current']['AI_KEY']
llm = init_model(current_key)
stream_llm = init_model(current_key,streaming=True)



# 使用精确匹配缓存
set_llm_cache(GPTCache(init_gpt_exact_cache))


# *********************S 接口区*******************************
#  接口 获取历史记录 get /get_history
@api_bp.route('/get_history', methods=['GET'])
def get_history():
    user_id=current_app.user['user_id']
    history = get_session_history(user_id)
    res = []
    for h in history.messages:
        _type = type(h)
        if _type == HumanMessage:
            res.append({"role":'user',"content":h.content})
        elif _type == AIMessage or _type == AIMessageChunk:
            # 查看是否有对应的文件消息
            session = get_var_from_file('memory/history.json')
            if session:
                if session.get(h.id):
                    message = res.pop()
                    match = re.search(r'^\[(.*)\](.*)',message['content'])
                    if match and match.group():
                        content = match.group(2)
                        files = match.group(1)
                        files = files.split(',')
                        if files:
                            for x in files:
                                res.append({"role":"user","content":os.path.join(config['upload']['UPLOAD_FOLDER'],x),"type":'img'})
                        res.append({"role":"user","content":content}) 
            res.append({"role":"assistant","content":h.content})
        else:
            res.append({"role":"system","content":h.content})    
    return send_json_msg(200,'',res)

#  接口 获取ai的回答 post /get_ai_msg
#  参数 {type:goods_info | chat , msg : 不带文件的消息必传}
"""
llm.stream方法发送请求，无法从缓存获取到数据
llm.invoke 方法发送请求获取到数据慢，但是能从缓存中获取到数据
"""
@api_bp.route('/get_ai_msg', methods=['POST'])
def get_ai_msg():
    p = get_ai_msg_parmams()
    if p==None :
        return send_json_msg(201)
    user_id = current_app.user['user_id']
    level_id = current_app.user['level_id']
    # 查看ai_chat和goods_info的访问是否受限
    _type = p.get('type')
    name = _type if  p.get('type')=='goods_info' else 'ai_chat'
    is_limit = verify_visit(user_id,level_id,name)
    if is_limit:
        return send_json_msg(402,'访问受限')
    else:
        is_stream = p.get('isStream')
        res = set_chain(llm,p)
        if res.get('chain'):        
            chain = res['chain']
            p['msg']=res['msg']
            p['is_file_msg'] = res['is_file_msg']
            print(f'问题是：{p.get("msg")}')
            
            config = {"configurable": {"session_id": user_id}} 
            if is_stream:
                return  stream_output(chain,p,config,user_id)
            else:
                return  no_stream_output(chain,p,config,user_id)
        else:
            send_json_msg(res['status' or 201,res['err']])


"""
llm.invoke 方法发送请求能从缓存中获取到数据并可以以流的形式返回给到情断
"""
#  接口 获取ai的回答 可用缓存 post /get_ai_msg_with_cache
@api_bp.route('/get_ai_msg_with_cache', methods=['POST'])
def get_ai_msg_with_cache():
    start_time = time.time()
    p = get_ai_msg_parmams()
    if p==None :
        return send_json_msg(201)
    user_id = current_app.user['user_id']
    level_id = current_app.user['level_id']
    # 查看ai_chat和goods_info的访问是否受限
    _type = p.get('type')
    _is_goods_info = _type =='goods_info'
    name = _type if  _is_goods_info else 'ai_chat'
    is_limit = verify_visit(user_id,level_id,name)
    # 1是新用户不受限制 2是普通用户有限制 3是会员有天数限制 4.5 无限制    
    if is_limit:
        return send_json_msg(402,'访问受限')
    else:
        is_stream = p.get('isStream')
        model = llm if not is_stream else stream_llm
        res = set_chain(model,p)
        chain = res.get('chain')
        if chain:        
            p['msg']=res['msg']
            is_file_msg=p['is_file_msg'] = res['is_file_msg']
            # print(f'问题是：{p.get("msg")}')
            callback = MyCallbackHandler(is_stream,is_file_msg,user_id,_type)
            # 如果是goods_info,不需要记录历史,不设置configurable
            if _is_goods_info:
                config = {"callbacks":[callback]}
            else:
                config = {
                    # 记录历史
                    "configurable": {"session_id": user_id},
                    "callbacks": [callback]
                }
            chain = chain.with_config(config)
            if is_stream:
                thread = threading.Thread(target=get_result,args=(chain,p['msg']))
                thread.start()  
                print(f"Execution time: {time.time() - start_time}s")             
                return Response(callback.generate_token(), mimetype="text/event-stream")
            else:
                r= get_result(chain,p['msg'])
                return send_json_msg(200,'',r.content)
        else:
            send_json_msg(res['status'] or 201,res['err'])


# *********************E 接口区*******************************