import nltk
import os

from langchain import ConversationChain, PromptTemplate
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
import sentence_transformers
from ChatGLM import ChatGLM
from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory, VectorStoreRetrieverMemory


class prompt_Structure:
    text_splitter = None
    embedding = None
    docsearch = None

    def __init__(self):
        self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
        self.embedding = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese')
        self.docsearch = Chroma(embedding_function=self.embedding)

    def get_chain(self, llm):
        retriever = self.docsearch.as_retriever()
        retriever.search_kwargs['distance_metric'] = 'cos'
        retriever.search_kwargs['fetch_k'] = 100
        retriever.search_kwargs['maximal_marginal_relevance'] = True
        retriever.search_kwargs['k'] = 1

        memory = VectorStoreRetrieverMemory(retriever=retriever)

        # chain = ConversationalRetrievalChain.from_llm(llm, chain_type="stuff", retriever=retriever,
        # return_source_documents=True)
        template = """你是一个在和人类对话的大数据科学咨询机器人.
                之前的对话：
                {history}
                （尽可能使用上述信息作为答案，如果之前对话于当前问题不相关，则不需要使用这些信息）
                当前对话：
                ##问题##{input}
                ##回答##"""
        PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
        chain = ConversationChain(
            llm=llm,
            prompt=PROMPT,
            memory=memory,
            verbose=True
        )
        return chain

    def load_new(self, directory):
        # loader = DirectoryLoader('C:\\Users\\16087\\PycharmProjects\\pythonProject2\\resource\\database')
        loader = DirectoryLoader(directory)
        documents = loader.load()
        texts = self.text_splitter.split_documents(documents)
        self.docsearch.add_documents(texts)

    # def load_base(self, directory):
    #     TextLoader()
    #     loader = DirectoryLoader(directory)
    #     documents = loader.load()
    #     texts = self.text_splitter.split_documents(documents)
    #     self.docsearch.add_documents(texts)
