from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain_openai import OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_text_splitters import Language
from langchain_community.document_loaders.parsers import LanguageParser
from langchain_community.document_loaders.generic import GenericLoader
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import (
    HumanMessage, AIMessage, SystemMessage
)
import os
import gradio as gr
import requests
import json
import subprocess
import threading
import time

webui_command = ["python", "api_server.py"]
webui_process = subprocess.Popen(
    webui_command, text=True)

print("开始启动 ui")
GITEE_ACCESS_TOKEN = os.environ.get("GITEE_ACCESS_TOKEN", "")
if (not GITEE_ACCESS_TOKEN):
    print("GITEE_ACCESS_TOKEN 环境变量不存在")
    raise ValueError("环境变量 GITEE_ACCESS_TOKEN 不存在！")

base_url = "http://127.0.0.1:8000/v1/"
repo_path = "/gitee-ai-docs"
git_clone_command = ["git", "clone",
                     f"https://oauth2:{GITEE_ACCESS_TOKEN}@gitee.com/gitee-ai/docs.git", repo_path]


def clone_doc_repo():
    try:
        git_process = subprocess.run(
            git_clone_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
        print("stdout:", git_process.stdout)
        print("stderr:", git_process.stderr)
    except Exception as e:
        print("克隆仓库发生错误:", e)


print("等待 uvicorn 启动完成: ")

# time.sleep(180)
db = ""


def update_doc_db():
    global db
    subprocess.run(["rm", "-rf", repo_path])
    clone_doc_repo()
    loader = GenericLoader.from_filesystem(
        repo_path,
        glob="**/*",
        suffixes=[".md", ".txt", ".json"],
        exclude=["**/non-utf8-encoding.py"],
        # 不指定 language=Language.PYTHON 自动从文件后缀推断语言
        parser=LanguageParser(parser_threshold=0),
    )
    documents = loader.load()
    python_splitter = RecursiveCharacterTextSplitter.from_language(
        # ValueError: Batch size 51030 exceeds maximum batch size 41666
        language=Language.MARKDOWN, chunk_size=500, chunk_overlap=200
    )
    docs = python_splitter.split_documents(documents)
    # print("召回内容",texts[0:10])
    print("docs 分块长度", len(docs))
    db = Chroma.from_documents(docs, OpenAIEmbeddings(disallowed_special=(), api_key="EMPTY", base_url=base_url, model="hf-models/bge-m3", timeout=300,
                                                      tiktoken_enabled=False, show_progress_bar=True, chunk_size=8192), persist_directory="./chroma_db")  # 非 OpenAI 实现，tiktoken_enabled 必须设置为 False
    return db


# print(documents[100:500])

# update_doc_db()

# chunk_size 块字符长度, 召回的每一项中字符串的长度, 代码文件需要比较大才能召回文件内容完整。300 行代码可能有六千字符
#  chunk_overlap (块重叠) 指相邻块之间的重叠部分，确保在块分块后， 块之间的内容不会被忽略

# def read_process_output(process):
#     """Reads the process output and prints it."""
#     while True:
#         output = process.stdout.readline()
#         if output == '' and process.poll() is not None:
#             break
#         if output:
#             print("API Server: "+output.strip())


# prompt = ChatPromptTemplate.from_messages(
#     [
#         (
#             "system", "你是一个 Gitee AI 文档助手，参考上下文中的文档内容，回答用户问题。你的名字叫马建仓。你幽默风趣，遵守中国法律，不回复任何敏感、违法、违反道德的问题。:\n\n{context}",
#         ),
#         ("placeholder", "{chat_history}"),
#         ("user", "{input}"),
#     ]
# )


# retriever_chain = create_history_aware_retriever(llm, retriever, prompt)
# document_chain = create_stuff_documents_chain(llm, prompt)

# qa = create_retrieval_chain(retriever_chain, document_chain)

# question = """
# Grimoire Gitee AI 快速开始
# """
# result = qa.invoke({"input": question})
# print(result["answer"])


# for chunk in qa.stream({"input": question}):
#     if (chunk.get("context")):
#         print("召回内容", chunk.get("context"))
#     if answer_chunk := chunk.get("answer"):
#         print(f"{answer_chunk}", end="")


# system_prompt = {
#     "role": "system",
#     "content":
#     "你是一个 Gitee AI 文档助手，参考上下文中文档内容，回答用户问题。你的名字叫马建仓。你幽默风趣，遵守中国法律，不回复任何敏感、违法、违反道德的问题。"
# }


def chat_fn(message, history):
    global db
    if (not message):
        raise gr.Error("请输入您的问题！")
    llm = ChatOpenAI(model="gitee", api_key="EMPTY", base_url=base_url,
                     callbacks=[StreamingStdOutCallbackHandler()],
                     streaming=True, temperature=0.3, presence_penalty=1.2, top_p=0.9)
    print("input:", message)
    if (not db):
        gr.Error("正在更新向量数据库，请稍后，过程大约持续五分钟")
        db = update_doc_db()
    retriever = db.as_retriever(
        # search_type="mmr",  # 值为 'similarity', 'similarity_score_threshold', 'mmr')
        search_type="mmr",
        search_kwargs={"k": 10}
    )
    history_openai_format = []
    search_res = str(retriever.invoke(message))
    print("搜索结果:", search_res)
    history_openai_format.append(SystemMessage(
        content=f"上下文的文档内容为: {search_res}。你是一个 Gitee AI 文档助手，不废话，参考上下文内容，只需要回答用户提到的问题，不需要全部总结。你的名字叫马建仓。你幽默风趣，遵守中国法律，不回复任何敏感、违法、违反道德的问题。"))
    for human, ai in history:
        history_openai_format.append(HumanMessage(content=human))
        history_openai_format.append(AIMessage(content=ai))
    history_openai_format.append(HumanMessage(message))
    full_answer = ""
    for response in llm.stream(history_openai_format):
        full_answer += response.content
        yield full_answer
    # full_answer = ""
    # for chunk in llm.stream(history_openai_format):
    #     # if (chunk.get("context")):
    #     # print("召回内容", chunk.get("context"))
    #     if answer_chunk := chunk.get("answer"):
    #         full_answer += answer_chunk
    #         print(f"{answer_chunk}", end="")
    # messages = [system_prompt]
    # for msg in history:
    #     messages.append({"role": "user", "content": str(msg[0])})
    #     messages.append({"role": "assistant", "content": str(msg[1])})
    # messages.append({"role": "user", "content": str(message)})
    # complete_message = ''
    # res = openai_api_request.simple_chat(messages=messages, use_stream=True)
    # for chunk in res:
    #     delta_content = chunk.choices[0].delta.content
    #     complete_message += delta_content
    #     # print(delta_content, end='')  # 不换行拼接输出当前块的内容
    #     yield complete_message  # gradio 需要返回完整可迭代内容
    # print(message)
    # print("\nComplete message:", complete_message)


chatbot = gr.Chatbot(height=450, label="Gitee 马建仓")


def toast_info_update_message():
    gr.Info("已请求更新")


with gr.Blocks(theme=gr.themes.Soft(), fill_height=True) as demo:
    gr.HTML("<div><div>")
    gr.HTML("<div>哈喽！我是马建仓，你在使用 Gitee AI 过程中有任何问题都可以找我！<div>")
    chat = gr.ChatInterface(chat_fn,
                            submit_btn="提交",
                            chatbot=chatbot,
                            clear_btn="清空",
                            stop_btn="暂停",
                            undo_btn=None,
                            retry_btn="重试",
                            examples=["Gitee AI 是什么?",
                                      "应用能干嘛？", "如何创建应用？", "介绍模型引擎", "一个应用会有哪些状态，分别是什么意思？"],
                            )
    update_button = gr.Button("点击更新文档向量库", elem_id="update_button")

    def on_button_click():
        update_doc_db()
        toast_info_update_message()
    update_button.click(fn=on_button_click)
    demo.css = """
    #update_button {
        width: 200px;  # 设置按钮宽度
    }
    """
demo.queue()
demo.launch(show_api=False)
