Spaces:
Running
Running
Dhahlan2000
commited on
Commit
·
a10faf8
1
Parent(s):
a889b71
Refactor LLM integration in app.py: replace RunnableSequence with LLMChain and update method call from invoke() to run() for improved functionality and clarity
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from langchain.schema import AgentAction, AgentFinish, HumanMessage
|
|
8 |
from langchain.prompts import BaseChatPromptTemplate
|
9 |
from langchain.tools import Tool
|
10 |
from langchain_huggingface import HuggingFacePipeline
|
11 |
-
from langchain import
|
12 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
13 |
from langchain.memory import ConversationBufferWindowMemory
|
14 |
import torch
|
@@ -154,8 +154,8 @@ output_parser = CustomOutputParser()
|
|
154 |
# Initialize HuggingFace pipeline
|
155 |
llm = HuggingFacePipeline(pipeline=pipe)
|
156 |
|
157 |
-
#
|
158 |
-
llm_chain =
|
159 |
tool_names = [tool.name for tool in tools]
|
160 |
agent = LLMSingleActionAgent(
|
161 |
llm_chain=llm_chain,
|
@@ -178,7 +178,7 @@ if st.button("Submit"):
|
|
178 |
with st.spinner("Processing..."):
|
179 |
try:
|
180 |
# Run the agent and get the response
|
181 |
-
response = agent_executor.
|
182 |
st.success("Response:")
|
183 |
st.write(response)
|
184 |
except Exception as e:
|
|
|
8 |
from langchain.prompts import BaseChatPromptTemplate
|
9 |
from langchain.tools import Tool
|
10 |
from langchain_huggingface import HuggingFacePipeline
|
11 |
+
from langchain import LLMChain
|
12 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
13 |
from langchain.memory import ConversationBufferWindowMemory
|
14 |
import torch
|
|
|
154 |
# Initialize HuggingFace pipeline
|
155 |
llm = HuggingFacePipeline(pipeline=pipe)
|
156 |
|
157 |
+
# LLM chain
|
158 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt_with_history)
|
159 |
tool_names = [tool.name for tool in tools]
|
160 |
agent = LLMSingleActionAgent(
|
161 |
llm_chain=llm_chain,
|
|
|
178 |
with st.spinner("Processing..."):
|
179 |
try:
|
180 |
# Run the agent and get the response
|
181 |
+
response = agent_executor.run(query) # Correct method is `run()`
|
182 |
st.success("Response:")
|
183 |
st.write(response)
|
184 |
except Exception as e:
|