import re
from flask import Flask, request, jsonify

# 通过环境变量传递（作用于全局，优先级最低）
import os
import qianfan
import yaml
import json
from sparkai.llm.llm import ChatSparkLLM
from sparkai.core.messages import ChatMessage
import dashscope
from dashscope import Generation
from volcenginesdkarkruntime import Ark
from zhipuai import ZhipuAI
import logging
# import mistune
import mdtex2html
from waitress import serve

with open("../config/config.yaml") as file:
    config = yaml.safe_load(file)  # 这会返回一个Python字典，你可以直接访问它

def init_env(config):
    os.environ["QIANFAN_ACCESS_KEY"] = config["QIANFAN_ACCESS_KEY"]
    os.environ["QIANFAN_SECRET_KEY"] = config["QIANFAN_SECRET_KEY"]
    # 星火认知大模型Spark Max的URL值，其他版本大模型URL值请前往文档（https://www.xfyun.cn/doc/spark/Web.html）查看
def init_logger():
    file_handler = logging.FileHandler('/var/log/flaskapp.log')
    formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
    file_handler.setFormatter(formatter)
    app.logger.addHandler(file_handler)

init_env(config=config)
SPARKAI_URL = "wss://spark-api.xf-yun.com/v3.5/chat"
# 星火认知大模型调用秘钥信息，请前往讯飞开放平台控制台（https://console.xfyun.cn/services/bm35）查看
SPARKAI_APP_ID = config["SPARKAI_APP_ID"]
SPARKAI_API_SECRET = config["SPARKAI_API_SECRET"]
SPARKAI_API_KEY = config["SPARKAI_API_KEY"]
# 星火认知大模型Spark Max的domain值，其他版本大模型domain值请前往文档（https://www.xfyun.cn/doc/spark/Web.html）查看
SPARKAI_DOMAIN = config["SPARKAI_DOMAIN"]
dashscope.api_key = config["DASHSCOPE_API_KEY"]
VOLC_ENDPOINT = config["VOLC_ENDPOINT"]
VOLC_BASE_URL = config["VOLC_BASE_URL"]
doubao_client = Ark(
    base_url=VOLC_BASE_URL,ak=config["VOLC_ACCESSKEY"],sk=config["VOLC_SECRETKEY"]
)
ZHIPUAI_API_KEY = config["ZHIPUAI_API_KEY"]
spark = ChatSparkLLM(
    spark_api_url=SPARKAI_URL,
    spark_app_id=SPARKAI_APP_ID,
    spark_api_key=SPARKAI_API_KEY,
    spark_api_secret=SPARKAI_API_SECRET,
    spark_llm_domain=SPARKAI_DOMAIN,
    streaming=False,
)
chat_comp = qianfan.ChatCompletion()
glm_client = ZhipuAI(api_key=ZHIPUAI_API_KEY) 

app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
yiyan_messages = []
qwen_messages = [{"role": "system", "content": "You are a helpful assistant."}]
spark_messages = [
    ChatMessage(
        role="user",
        content="You are a helpful assistant.",
    )
]
doubao_messages = [
    {"role": "system", "content": "你是豆包，是由字节跳动开发的 AI 人工智能助手"}
]
glm_messages = []

def get_response(messages):
    response = Generation.call(
        model="qwen-long",
        messages=messages,
        # 将输出设置为"message"格式
        result_format="message",
    )
    return response


def init_messages_all():
    for messages_name in ["yiyan", "qwen", "spark", "doubao", "glm"]:
        init_messages(messages_name)


def init_messages(messages_name):
    if messages_name == "yiyan":
        global yiyan_messages
        yiyan_messages = []
    elif messages_name == "qwen":
        global qwen_messages
        qwen_messages = [{"role": "system", "content": "You are a helpful assistant."}]
    elif messages_name == "spark":
        global spark_messages
        spark_messages = [
            ChatMessage(role="user", content="You are a helpful assistant.")
        ]
    elif messages_name == "doubao":
        global doubao_messages
        doubao_messages = [
            {
                "role": "system",
                "content": "你是豆包，是由字节跳动开发的 AI 人工智能助手",
            }
        ]
    elif messages_name == "glm":
        global glm_messages
        glm_messages = []

@app.route("/api/chat_message", methods=["GET", "POST"])
def chat_message():
    if not request.data:
        print("no data")
    data = request.data.decode("utf-8")
    data_json = json.loads(data)
    model = data_json["model"]
    ques = data_json["ques"]
    # mode = data_json["mode"]

    resp = ""
    if model == "glm":
        glm_messages.append({"role": "user", "content": ques})
        resp = glm_client.chat.completions.create(
        model=config["GLM_MODEL"],  # 填写需要调用的模型编码
        messages=glm_messages).choices[0].message.content
        glm_messages.append({"role": "assistant", "content": resp})
    elif model == "yiyan" :
        yiyan_messages.append({"role": "user", "content": ques})
        resp = chat_comp.do(model=config['QIANFAN_MODEL'], messages=yiyan_messages)["body"][
            "result"
        ]
        yiyan_messages.append({"role": "assistant", "content": resp})
    elif model == "qwen":
        qwen_messages.append({"role": "user", "content": ques})
        resp = get_response(qwen_messages).output.choices[0]["message"]["content"]
        qwen_messages.append({"role": "assistant", "content": resp})
    elif model == "spark":
        spark_messages.append(
            ChatMessage(
                role="user",
                content=ques,
            )
        )
        resp = spark.generate([spark_messages]).generations[0][0].text
        spark_messages.append(ChatMessage(role="assistant", content="resp"))
    elif model == "doubao":
        print("doubao")
        doubao_messages.append({"role": "user", "content": ques})
        resp = (
            doubao_client.chat.completions.create(
            model=VOLC_ENDPOINT,
            messages=doubao_messages,
        )
        .choices[0]
        .message.content
        )
        doubao_messages.append({"role": "assistant", "content": resp})
        # print("doubao_messages is ", doubao_messages)
    app.logger.info('rsp is %s ', resp)
    patten = r"###Thinking(.*)###Response"
    resp = re.sub(pattern=patten, repl="###Response", string=resp,flags=re.DOTALL)
    
    html = mdtex2html.convert(resp, extensions=['abbr','def_list','nl2br','attr_list','fenced_code', 'sane_lists', 'smarty', 'footnotes', 'meta', 'toc'])
    return jsonify({"code": 200, "msg": "success", "data": html,"markdown":resp})

@app.route("/api/chat_clear", methods=["GET", "POST"])
def chat_clear():
    if not request.data:
        # print("no data")
        app.logger.info('no data')
    data = request.data.decode("utf-8")

    data_json = json.loads(data)
    op = data_json["op"]
    if op == "clear_all":
        init_messages_all()
    elif op == "clear_yiyan":
        init_messages("yiyan")
    elif op == "clear_qwen":
        init_messages("qwen")
    elif op == "clear_spark":
        init_messages("spark")
    elif op == "clear_doubao":
        init_messages("duobao")
    elif op == "clear_glm":
        init_messages("glm")
    return jsonify({"code": 200, "msg": "success", "data": op+"_success"})


if __name__ == "__main__":
    serve(app, host='0.0.0.0', port=9092,threads=1)
