import json
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage
from langchain_core.runnables import RunnableBranch, RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain_community.vectorstores import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import MarkdownHeaderTextSplitter
from pydantic import BaseModel, Field

from create_llm import create_llm, create_embeddings, chain_log

file_path_list = [
  '../data/md_list/百炼_图像局部重绘.md',
  # '../data/md_list/百炼_涂鸦作画.md',
]


class RetrieveInfo(BaseModel):
  retrieveFlag: str = Field(description="是否需要检索文档，值为Y或N")
  codeFlag: str = Field(description="是否为代码类的问题，值为Y或N")


def load_and_process_document():
  documents = []
  for file_path in file_path_list:
    # 遍历每份文档
    loader = TextLoader(file_path=file_path, encoding="utf-8")
    # 加载原始文档，返回包含文档的列表
    raw_documents = loader.load()
    # markdown 文本分割器
    splitter = MarkdownHeaderTextSplitter(
      headers_to_split_on=[
        ("#", "一级标题"),
        ("##", "二级标题"),
        ("###", "三级标题"),
      ],
      strip_headers=False,  # 保留标题文本在分割后的内容中，不剥离标题
    )

    documents.extend(splitter.split_text(raw_documents[0].page_content))

  return documents

# 通过要潜入的文档对象创建向量数据库实例
def create_vector_store(documents):
  embedding = create_embeddings("local_embedding")
  vectordb = Chroma.from_documents(
    documents=documents,
    embedding=embedding,
  )
  return vectordb

# 创建一个管理对话历史的工具函数
def create_history_manage():
  history_list = []
  def save_message(msg):
    if isinstance(msg, BaseMessage):
      history_list.append(msg)
    else:
      raise TypeError("Message must be type BaseMessage")

  def build_message():
    return history_list

  def print_message():
    print('\n print message start', '-' * 20)
    for msg in history_list:
      print(f"{msg.type}: {msg.content}")
    print('\n print message end', '-' * 20)

  return save_message, build_message, print_message

# 使用问题向检索向量数据库得到检索结果
def retrieve_from_vector(question: str, isCode: bool):
  if isCode:
    docs = vectordb.max_marginal_relevance_search(question, k = 3)
  else:
    docs = vectordb.similarity_search(question, k = 3)

  if len(docs) == 0:
    return None

  doc_content = []
  for doc in docs:
    doc_content.append(doc.page_content)
  return "\n\n".join(doc_content)

def inject_retrieve_content(question: str, isCode: bool):
  retrieve_content = retrieve_from_vector(question, isCode)
  return f"""
    #任务
    你需要参考以下内容来回答用户的问题
    #参考内容
    {retrieve_content}
    #问题
    {question}
  """

save_message, build_message, print_message = create_history_manage()

documents = load_and_process_document()

vectordb = create_vector_store(documents)

# /*---------------------------------------判断是否需要检索链-------------------------------------------*/

should_retrieve_chain = ChatPromptTemplate.from_template("""
# 任务
请判断用户的问题是否与以下文档主题存在相同的名词
返回的结果必须是一个json对象，对象中有retrieve_flag和code_flag两个属性，值都是Y或者N；
如果存在相同的名词时，返回的retrieve_flag为Y，否则为N；
存在相同的名词时，如果用户的问题为代码相关的问题，输出的结果code_flag为Y，否则为N；
如果不存在相同的名词，则输出的retrieve_flag为N，并且code_flag也为N；

# 文档列表
{file_list}

# 用户问题
{question}
""") | create_llm(temperature=0) | JsonOutputParser(pydantic_object=RetrieveInfo) | chain_log()

prompt_chain = (RunnablePassthrough.assign(retrieve_info = should_retrieve_chain)
  | RunnableBranch(
    (lambda x: x["retrieve_info"]["retrieve_flag"] == "Y", RunnablePassthrough.assign(user_content=lambda x: inject_retrieve_content(x["question"], x["retrieve_info"]["code_flag"] == "Y"))),
    RunnablePassthrough.assign(user_content=lambda x: x["question"])
  )
  | chain_log()
  | ChatPromptTemplate.from_messages([
    ('system', "你是一个智能助手，你需要帮助用户回答问题"),
    MessagesPlaceholder(variable_name="chat_history"),
    HumanMessagePromptTemplate.from_template("{user_content}")
  ])

)
chain = RunnablePassthrough.assign(
  chat_history = lambda x: build_message(),
  file_list = lambda x: "\n".join(file_path_list)
) | prompt_chain | create_llm(temperature=0) | StrOutputParser()


def send(question: str):
  full_text = ""
  for chunk in chain.stream({"question": question}):
    full_text += chunk
    print(chunk, end="", flush=True)

  save_message(HumanMessage(question))
  save_message(AIMessage(full_text))
  return full_text


while True:
  question = input("\n 请输入您的问题：")
  if question == "exit":
    break
  send(question)


