"""
该文件旨在流式输出是从缓存中获取数据，
但是目前的测试会降低返回速度，并且有的明明已经返回200状态码，
但是前端无法接收到信息，还有超时的风险
而且无法记录历史数据

"""

# 系统文件
from flask import  (jsonify,current_app,request,Blueprint,Response)
import os

api_test_bp = Blueprint('api_test', __name__)


import threading


from utils.langchain_about import init_model

from langchain_core.runnables.history import RunnableWithMessageHistory

# 定义大语言模型 默认LLM 是MoonShot
llm = init_model('MoonShot')
stream_llm = init_model('MoonShot',streaming=True)

# LLM 是智普清言
# llm_zhipu = init_model('ZhiPuAI')

generate_data=None

# **********************开启缓存***************************
from langchain.globals import set_llm_cache
from langchain_community.cache import GPTCache
from utils.api_cache import init_gpt_exact_cache
# 使用精确匹配缓存
set_llm_cache(GPTCache(init_gpt_exact_cache))


# 日志文件
from utils.log import logger
from common import config
# 错误消息
from utils.message import send_json_msg


from langchain.schema import HumanMessage,AIMessage
from langchain_core.messages import AIMessageChunk
from utils.do_db import verify_visit,set_today_visit


# *********************S 接口区*******************************
# 测试
@api_test_bp.route('/test', methods=['POST'])
def test():
    tmp = set_chain(llm,{"msg":'tell me a joke'})
    if tmp.get('chain'):
        chain = tmp.get('chain')
        config = {"configurable": {"session_id": 50}} 
        res = chain.invoke({"input":'tell me a joke'},config)
        return send_json_msg(200,'',res.content)
    return send_json_msg(500,'',tmp.get('msg'))
#  接口 获取ai的回答 post /get_ai_msg
#  参数 {type:goods_info | chat , msg : 不带文件的消息必传}
@api_test_bp.route('/test_get_ai_msg', methods=['POST'])
def get_ai_msg():
    p = _get_ai_msg_parmams()
    if p==None :
        return send_json_msg(201)
    user_id = current_app.user['user_id']
    # 查看ai_chat和goods_info的访问是否受限
    _type = p.get('type')
    _is_goods_info = p.get('type')=='goods_info'
    # name = _type if  _is_goods_info else 'ai_chat'
    # is_limit = verify_visit(user_id,name)
    is_limit=False
    if is_limit:
        return send_json_msg(402,'访问受限')
    else:
        is_stream = p.get('isStream')
        model = llm if not is_stream else stream_llm
        res = set_chain(model,p)
        if res.get('chain'):        
            chain = res['chain']
            p['msg']=res['msg']
            p['is_file_msg'] = res['is_file_msg']
            # print(f'问题是：{p.get("msg")}')
            callback = MyCallbackHandler(is_stream)
            # config = {"callbacks": [callback]}
            # if not _is_goods_info:
            #     config ["configurable"]={"session_id": user_id}
            if is_stream:
                config = {
                    "configurable": {"session_id": user_id},
                    "callbacks": [callback]
                }
                def f(chain,p):
                    print('执行stream:\n')
                    return chain.invoke({"input":p['msg']},config)
                
                thread = threading.Thread(target=f,args=(chain,p))
                thread.start()               
                return Response(callback.generate_token(), mimetype="text/event-stream")
            else:
                config = None if _is_goods_info else {"configurable": {"session_id": user_id}}
                r= chain.invoke({"input":p['msg']},config)
                send_json_msg(200,'',r.content)
        else:
            send_json_msg(res['status' or 201,res['err']])
                
            # *********************E 接口区*******************************

def cancel_after(duration,fn):
    timer = threading.Timer(duration, threading.current_thread().cancel)
    timer.start()
    try:
        fn()
    except threading.Thread._local._error:  # 这是取消操作时捕获的异常
        print("取消执行")
    finally:
        timer.cancel()

def t(res):
    yield res

# *********************S 函数区*******************************
class VariableMonitor:
    def __init__(self):
        self._observers = []
        self._value = None

    def add_observer(self, observer_func):
        self._observers.append(observer_func)

    def set_value(self, value):
        self._value = value
        for observer in self._observers:
            observer(value)


def get_data_from_cache(token,is_stream):
    print(f'in get_data \n {token}' )
    # 缓存获取数据
    if is_stream:
        # 流式传输
        res= Response(t(token), mimetype="text/event-stream")
        return res,200
        
    else:
        return send_json_msg(200,'',token)


    
from langchain.callbacks import StreamingStdOutCallbackHandler 
class MyCallbackHandler(StreamingStdOutCallbackHandler):
    cache=True
    end = False
    tokens=[]
    def __init__(self, isStream):
        self.isStream = isStream
    def on_llm_new_token(self, token: str, **kwargs) -> None:  
            print('====new===\n',token)
            self.cache = False   
            self.tokens.append(token)
            # get_data(token,self.isStream,False)
           
    def on_llm_end(self, response, **kwargs) -> None:
        print('====llm end===\n',response)
        if not self.tokens and self.cache:
            self.tokens = [response.generations[0][0].text]
        self.end = True
    def on_llm_error(self, error: Exception, **kwargs) -> None:
        print('====error===\n',error)
        if error:
            if error.response.status_code == 429 :
                print('err 429,请求过于频繁，请1s后重试')
        self.tokens.append(str(error))
        
    def generate_token(self):
        while not self.end:
            if self.tokens:
                token = self.tokens.pop(0)
                yield token
            else:pass





def _get_ai_msg_parmams():
    p = None
    if request.is_json:
        if not 'msg' in request.json or request.json['msg'] == '':
            return  None
        p = request.json
        # p['files']=None
    else :
        p = request.form.to_dict()
        no_msg = not 'msg' in request.form or request.form['msg']==''
        no_files = len(request.files)==0
        if no_files==False:
            # 判断文件格式是否正确            
            p['files']=request.files
        if no_msg:
            if no_files:
                return None
            else:
                p['msg']="整理一下刚刚接收到的文件的内容"          
    return p

def set_files_msg(is_file_msg,msg_id,user_id):
     if is_file_msg:
            # 记录本地文件对应的会话id和用户
            session = get_var_from_file('memory/history.json')
            session= session if session else {}
            session[msg_id] = user_id
            save_var_to_file(session,'memory/history.json')
from langchain_community.chat_message_histories import SQLChatMessageHistory
def get_session_history(session_id):
    return SQLChatMessageHistory(session_id, "sqlite:///history.db")


from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# from langchain_core.runnables import RunnablePassthrough
from langchain.schema import SystemMessage
import re
from typing import Dict
def set_chain(llm,param:Dict):    
    files = param.get('files')
    msg_type = param.get('type')
    msg = param.get('msg')
    files_prompt= []
    is_file_msg=False
    if msg_type!='goods_info':
        if files:
            tmp = set_files_prompt(files)
            if tmp.get('err'):
                return tmp
            else:
                # 获取到文件解析结果 重置消息
                files_prompt = tmp['files_prompt']
                filenames = tmp['filenames']
                msg = f'[{filenames}]{msg}'
                is_file_msg = True
    sys = set_goods_sys_prompt(msg) if msg_type=='goods_info' else "您是用户的智能管家，会尽可能在200字以内回答用户的问题,如果是文件消息无需告诉用户文件名和文件类型等信息"

    messages = [
        (
                "system",
                sys ,
            ),
            ("human", "{input}"),
        ] if msg_type=='goods_info' else [
            *files_prompt,
            (
                "system",
                sys ,
            ),
            MessagesPlaceholder(variable_name="history", n_messages=10),
            ("human", "{input}"),
        ]
       
    prompt = ChatPromptTemplate.from_messages(messages)
    chain = prompt | llm
    res = chain if msg_type=="goods_info" else RunnableWithMessageHistory(
            chain,
            get_session_history,
            input_messages_key="input",
            history_messages_key="history",
        )
    return {
        "chain":res,
        "msg":msg,
        "is_file_msg":is_file_msg
    }

def set_goods_sys_prompt(msg):   
    group = re.match(r'^(.+)的用途和使用方法$',msg)
    if group :
        goods_name = group.group(1)        
        return  f"""您是用户的智能管家，先判断{goods_name}是不是一个物品，如果不是回答“无”即可。如果是，按如下格式回答给用户：
        \n*** 用途 ***\n
            1. ....\n
            2. ....\n
            ....\n
            \n*** 使用方法***\n
            1. ....\n
            2. ....\n
            ....\n
        ，无需其他的文字做末尾总结。
        """
    
from utils.upload_about import moonshot_get_file_content,moonshot_get_file_obj,upload_file_to_local

import json
def set_files_prompt(files):
    files_prompt =[]
    err_msg = None
    # 一个文件出错则全部终止
    # 文件上传到本地获取文件信息    
    if files:
        # [{filename,finalname,open_path}]
        local_files_info = upload_file_to_local(files)
        _type = type(local_files_info)
        if _type==list:
            # 上传成功
            for item in local_files_info:
                name = item['finalname']
                exact_info= get_file_info(name)
                if exact_info:                    
                    files_prompt.append(SystemMessage(exact_info['exact_content']))
                else:                     
                    file_obj_res = moonshot_get_file_obj(item,open(item['open_path'], 'rb'))
                    if file_obj_res.status_code==200:                        
                        file_obj = json.loads(file_obj_res.text)
                        file_content_res = moonshot_get_file_content(file_obj['id'])
                        if file_content_res.status_code==200:
                            file_content = file_content_res.text
                            files_prompt.append(SystemMessage(file_content))
                            set_file_info(file_content,file_obj['id'],item['finalname'])
                        # 抽取文件内容失败
                        else:
                            print(f'抽取内容失败：{file_content_res.text}')
                            f_name = item['finalname']
                            err_msg=f'{f_name}文件解析失败:抽取内容失败'
                            remove_local_file(local_files_info)
                            break
                    # 文件上传抽取文件对象失败 格式等不正确
                    else:
                        # 删除本地文件
                        remove_local_file(local_files_info)
                        f_name = item['finalname']
                        err_msg=f'{f_name}文件解析失败：抽取文件对象失败'
                        break
            if err_msg:
                return {"err":err_msg,"status":201}
            else:
                return {
                    "files_prompt":files_prompt,
                    "filenames":','.join([d['finalname'] for d in local_files_info])
                }
        else:
            # 出错了
            return local_files_info
    else:
        return {"err":"没有文件需要解析","status":201}

   

def remove_local_file(files_info):
    for item in files_info:
        os.remove(item['open_path'])
def get_file_info(filename):
    file_config = get_var_from_file('memory/file_config.json')
    if file_config:
        info =list(filter(lambda x:x['filename']==filename,file_config)) 
        if not info: return None
        return info[0]
    return None
    
from utils.upload_about import get_var_from_file,save_var_to_file
def set_file_info(exact_content,file_id,filename):
    file_config = get_var_from_file('memory/file_config.json')
    if file_config:
        file_config.append({'exact_content':exact_content,'file_id':file_id,'filename':filename})
    else:
        file_config = [{'exact_content':exact_content,'file_id':file_id,'filename':filename}]    
    save_var_to_file(file_config,'memory/file_config.json')


   