kookoobau commited on
Commit
e8f4525
1 Parent(s): 9ebac62
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -1,8 +1,12 @@
1
  from langchain import HuggingFaceHub, PromptTemplate
2
  from langchain.memory import ConversationBufferMemory
3
- from langchain.chains import ConversationChain
 
4
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
  import gradio as gr
 
 
 
6
 
7
  template = """Question: {history}
8
  ------------------
@@ -11,19 +15,23 @@ Answer: Let's think step by step."""
11
  prompt = PromptTemplate(template=template, input_variables=["history"])
12
 
13
  # Create a memory module with a maximum capacity of 1000 items
14
- memory = ConversationBufferMemory()
15
  # Callbacks support token-wise streaming
16
  callbacks = [StreamingStdOutCallbackHandler()]
17
- # Instantiate the LLMChain with the model and tokenizer
18
  llm = HuggingFaceHub(repo_id="gpt2")
19
 
20
- conversation = ConversationChain(llm=llm, memory=memory, callbacks=callbacks, prompt=prompt)
 
 
 
 
 
 
21
 
22
  # Define the Gradio interface
23
  def chatbot_interface(input_text):
24
  response = conversation.predict(input_text)
25
- memory.chat_memory.add_user_message(input_text)
26
- memory.chat_memory.add_ai_message(response)
27
  return response
28
 
29
  # Define the Gradio app
 
1
  from langchain import HuggingFaceHub, PromptTemplate
2
  from langchain.memory import ConversationBufferMemory
3
+ from langchain.agents import AgentType, initialize_agent
4
+ from langchain.chains import AgentChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
  import gradio as gr
7
+ import os
8
+
9
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
10
 
11
  template = """Question: {history}
12
  ------------------
 
15
  prompt = PromptTemplate(template=template, input_variables=["history"])
16
 
17
  # Create a memory module with a maximum capacity of 1000 items
18
+ memory = ConversationBufferMemory(max_capacity=1000)
19
  # Callbacks support token-wise streaming
20
  callbacks = [StreamingStdOutCallbackHandler()]
21
+ # Instantiate the Hugging Face model
22
  llm = HuggingFaceHub(repo_id="gpt2")
23
 
24
+ # Define the tools
25
+ tools = []
26
+
27
+ # Initialize the agent chain
28
+ agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
29
+
30
+ conversation = AgentChain(agent_chain, callbacks=callbacks, prompt=prompt)
31
 
32
  # Define the Gradio interface
33
  def chatbot_interface(input_text):
34
  response = conversation.predict(input_text)
 
 
35
  return response
36
 
37
  # Define the Gradio app