import os
from langchain.schema import (
    HumanMessage,
    SystemMessage
)
from langchain_openai import ChatOpenAI
import yaml
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders import TextLoader
from langchain.memory import ConversationSummaryMemory
from langchain.prompts import (
    ChatPromptTemplate,
    MessagesPlaceholder,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai.embeddings import OpenAIEmbeddings

from langchain_community.vectorstores import Qdrant
from langchain.chains import ConversationalRetrievalChain
with open('config.yaml') as file:
    data = yaml.load(file, Loader=yaml.FullLoader)

chatAI = ChatOpenAI(model="gpt-3.5-turbo",
                        temperature=0,
                        max_tokens=2000,
                        api_key=data['openai_api_key_github_website'],
                        base_url=data['openai_github_website_base_url'])

class CommandlineChatbot:
    def __init__(self,dir):
        base_dir = dir
        documents = []
        for file in os.listdir(base_dir): 
            file_path = os.path.join(base_dir, file)
            if file.endswith('.pdf'):
                loader = PyPDFLoader(file_path)
                documents.extend(loader.load())
            elif file.endswith('.docx') or file.endswith('.doc'):
                loader = Docx2txtLoader(file_path)
                documents.extend(loader.load())
            elif file.endswith('.txt'):
                loader = TextLoader(file_path)
                documents.extend(loader.load())
        
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
        all_splits = text_splitter.split_documents(documents)
        print(len(all_splits))
        
        self.vectorstore = Qdrant.from_documents(
            documents=all_splits, 
            embedding=OpenAIEmbeddings(
                openai_api_base=data['openai_github_website_base_url_embedding'],
                openai_api_key=data['openai_api_key_github_website'],
            ), 
            location=":memory:",
            collection_name="my_documents",)
        self.chatAI = chatAI
        self.messages = [SystemMessage(content="你是一个花卉行家。")]
        self.prompt = ChatPromptTemplate(
            messages=[
                SystemMessagePromptTemplate.from_template(
                    "你是一个花卉行家。你通常的回答不超过30字。"
                ),
                MessagesPlaceholder(variable_name="chat_history"),
                HumanMessagePromptTemplate.from_template("{question}")
            ]
        )

        self.memory = ConversationSummaryMemory(
            llm=self.chatAI,
            memory_key="chat_history", 
            return_messages=True)
        
        retriever = self.vectorstore.as_retriever()
        self.qa = ConversationalRetrievalChain.from_llm(
            self.chatAI, 
            retriever=retriever, 
            memory=self.memory
            )

    def chat_loop(self):
        print("Chatbot 已启动! 输入'exit'来退出程序。")
        while True:
            user_input = input("你: ")
            if user_input.lower() == 'exit':
                print("再见!")
                break
            response = self.qa.invoke(user_input)
            print(f"Chatbot: {response['answer']}")

if __name__ == "__main__":
    folder = "OneFlower"
    bot = CommandlineChatbot(folder)
    bot.chat_loop()