from typing import List

from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_core.messages import SystemMessage, BaseMessage, AIMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_text_splitters import MarkdownHeaderTextSplitter
from pydantic import BaseModel, Field

from create_llm import create_embeddings, create_llm, runnable_chain_log

file_path_list = [
  "../data/md_list/百炼_图像局部重绘.md",
  "../data/md_list/百炼_涂鸦作画.md"
]

class RetrieveInfo(BaseModel):
  retrieve_flag: str = Field(description="是否需要检索文档，值为Y或N")
  code_flag: str = Field(description="是否为代码类的问题，值为Y或N")

retrieve_parser = JsonOutputParser(pydantic_object=RetrieveInfo)

def load_and_process_document():
  documents = []
  for file_path in file_path_list:
    loader = TextLoader(file_path=file_path, encoding='utf-8')
    raw_documents = loader.load()
    splitter = MarkdownHeaderTextSplitter(
      headers_to_split_on=[
        ("#", "一级标题"),
        ("##", "二级标题"),
        ("###", "三级标题")
      ],
      strip_headers=False
    )
    documents.extend(splitter.split_text(raw_documents[0].page_content))
  return documents

def create_vector_store(documents):
  embedding = create_embeddings("local_embedding")
  vectordb = Chroma.from_documents(
    documents=documents,
    embedding=embedding,
  )
  return vectordb

def create_history_manager():
  history_list: List[BaseMessage] = []

  def save_message(msg):
    if isinstance(msg, BaseMessage):
      history_list.append(msg)
    else:
      raise TypeError("Unsupported message type", type(msg))

  def build_message():
    return history_list

  def print_messages():
    print("\nprint messages start", "-" * 20)
    for msg in history_list:
      print(msg.type, "：", msg.content)
    print("\nprint messages end", "-" * 20)

  return (save_message, build_message, print_messages)

def retrieve_from_vector(question, isCode=False) -> str | None:
  if isCode:
    docs = vectordb.max_marginal_relevance_search(query=question, k=3)
  else:
    docs = vectordb.similarity_search(query=question, k=3)

  if len(docs) == 0:
    return None

  doc_content = []
  for doc in docs:
    doc_content.append(doc.page_content)
  return ("\n\n" * 5).join(doc_content)

save_message, build_message, print_messages = create_history_manager()
documents = load_and_process_document()
vectordb = create_vector_store(documents)

should_retrieve_chain = (
  ChatPromptTemplate.from_template("""
  # 任务
  请判断用户问题是否与以下文档主题存在相同的名词：
  返回结果是一个json对象，对象有 retrieve_flag 以及 code_flag 两个属性，两个属性的值都是Y或者N；
  存在相同名词，输出结果的 retrieve_flag 为Y，否则为N，
  在存在相同名词的基础上，如果问题类型为代码问题，则输出结果的 code_flag 为Y，否则为N
  如果不存在相同名词，则输出结果的 retrieve_flag 以及 code_flag 都是N

  # 文档列表
  {file_list}

  # 问题
  {question}
  """)
  | runnable_chain_log()
  | create_llm(temperature=0)
  | retrieve_parser
  | runnable_chain_log())

def inject_retrieve_content(question: str, isCode=False):
  retrieve_content = retrieve_from_vector(question, isCode)
  return f"""
  # 任务
  你需要参考如下内容来回答用户问题：
  # 参考内容
  {retrieve_content}
  # 问题
  {question}
  """

prompt_chain = (
  RunnablePassthrough.assign(
    retrieve_info=should_retrieve_chain
  )
  | runnable_chain_log()
  | runnable_chain_log(lambda x: x['question'])
  | RunnablePassthrough.assign(
  user_content=lambda x: inject_retrieve_content(x['question'], x['retrieve_info']['code_flag'] == "Y") if x['retrieve_info']['retrieve_flag'] == "Y" else x['question']
)
  | (ChatPromptTemplate.from_messages([
  SystemMessage(content="""你是一个智能助手，你需要帮助用户解答问题"""),
  MessagesPlaceholder(variable_name="chat_history"),
  HumanMessagePromptTemplate.from_template("{user_content}"),
]))
)

chain = (
  RunnablePassthrough.assign(
    chat_history=lambda x: build_message(),
    file_list=lambda x: ",".join(file_path_list)
  ) | prompt_chain
  | create_llm(temperature=0)
  | StrOutputParser()
)

while True:
  question = input("\n请输入你的问题: ")
  if question.lower() == 'exit':
    break

  full_text = ""
  for chunk in chain.stream({"question": question, }):
    full_text += chunk
    print(chunk, end="", flush=True)

  save_message(HumanMessage(content=question))
  save_message(AIMessage(content=full_text))

  print_messages()
