from langchain.agents import initialize_agent, AgentType
from langchain.chains import RetrievalQA, APIChain
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_community.chat_models import ChatTongyi, ChatOpenAI, ChatOllama
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores.qdrant import Qdrant
from langchain_core.messages import HumanMessage, AIMessage
from selftool import mytools
from db_opeartor import client


def handle_message(chat_history: str, model_name: str, chat_mode: str, knowledge_base: str) -> str:
    question = chat_history[-1][0]

    if model_name.count('qwen') > 0:
        llm = ChatTongyi(model_name=model_name)
    else:
        #llm = ChatOpenAI(model_name=model_name)
        llm= ChatOllama(base_url="http://172.20.18.158:7000", model="llama2-chinese")


    if chat_mode == "大模型":
        history_langchain_format = []
        for human, ai in chat_history[0:len(chat_history) - 1]:
            history_langchain_format.append(HumanMessage(content=human))
            history_langchain_format.append(AIMessage(content=ai))
        history_langchain_format.append(HumanMessage(content=question))
        return llm(history_langchain_format).content
    elif chat_mode == "大模型+知识库":
        db = Qdrant(
            client=client, collection_name=knowledge_base,
            embeddings=DashScopeEmbeddings()
        )
        qa_chain = RetrievalQA.from_chain_type(llm, retriever=db.as_retriever())
        return qa_chain.run(query=chat_history[-1][0])
    else:
        memory = ConversationBufferMemory(memory_key="chat_history")
        memory_history = ChatMessageHistory()
        for human, ai in chat_history[0:len(chat_history) - 1]:
            memory_history.messages.append(HumanMessage(content=human))
            memory_history.messages.append(AIMessage(content=ai))
        memory.chat_memory = memory_history
        agent = initialize_agent(
            mytools,
            llm,
            agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
            verbose=True,
            memory=memory,
            handle_parsing_errors=True)
        return agent(question)["output"]

