Waseem771 commited on
Commit
a3187fe
1 Parent(s): 260ccf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -9,6 +9,7 @@ from dotenv import load_dotenv
9
  # Load environment variables
10
  load_dotenv()
11
 
 
12
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
13
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
14
 
@@ -21,15 +22,22 @@ prompt = ChatPromptTemplate.from_messages(
21
  )
22
 
23
  # Streamlit app
24
- st.title('Langchain Demo With LLAMA2 API')
25
  input_text = st.text_input("Search the topic you want")
26
 
27
- # Ollama LLama2 LLM
28
- llm = Ollama(model="llama2")
 
 
 
 
29
  output_parser = StrOutputParser()
30
  chain = prompt | llm | output_parser
31
 
32
  # Display result when user inputs text
33
  if input_text:
34
- response = chain.invoke({"question": input_text})
35
- st.write(response)
 
 
 
 
9
  # Load environment variables
10
  load_dotenv()
11
 
12
+ # Set environment variables
13
  os.environ["LANGCHAIN_TRACING_V2"] = "true"
14
  os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
15
 
 
22
  )
23
 
24
  # Streamlit app
25
+ st.title('Langchain Demo With Ollama Llama2 API')
26
  input_text = st.text_input("Search the topic you want")
27
 
28
+ # Ollama LLama2 LLM with remote or local settings
29
+ # Ensure the model points to a valid URL if not running locally
30
+ llm = Ollama(
31
+ model="llama2",
32
+ server_url=os.getenv("OLLAMA_SERVER_URL", "http://localhost:11434") # Add server URL to env
33
+ )
34
  output_parser = StrOutputParser()
35
  chain = prompt | llm | output_parser
36
 
37
  # Display result when user inputs text
38
  if input_text:
39
+ try:
40
+ response = chain.invoke({"question": input_text})
41
+ st.write(response)
42
+ except Exception as e:
43
+ st.error(f"Error: {e}")