gabruarya commited on
Commit
9f0e461
1 Parent(s): fa56487

Tried groq

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -10,6 +10,7 @@ from langchain.vectorstores import Pinecone
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import RetrievalQA
12
  import streamlit.components.v1 as components
 
13
  import time
14
 
15
  HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
@@ -35,6 +36,7 @@ def initialize_session_state():
35
  if "conversation" not in st.session_state:
36
  llama = LlamaAPI(st.secrets["LlamaAPI"])
37
  model = ChatLlamaAPI(client=llama)
 
38
 
39
  embeddings = download_hugging_face_embeddings()
40
 
@@ -60,7 +62,7 @@ def initialize_session_state():
60
 
61
  PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
62
  chain_type_kwargs = {"prompt": PROMPT}
63
- retrieval_chain = RetrievalQA.from_chain_type(llm=model,
64
  chain_type="stuff",
65
  retriever=docsearch.as_retriever(
66
  search_kwargs={'k': 2}),
 
10
  from langchain.prompts import PromptTemplate
11
  from langchain.chains import RetrievalQA
12
  import streamlit.components.v1 as components
13
+ from langchain_groq import ChatGroq
14
  import time
15
 
16
  HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
 
36
  if "conversation" not in st.session_state:
37
  llama = LlamaAPI(st.secrets["LlamaAPI"])
38
  model = ChatLlamaAPI(client=llama)
39
+ chat = ChatGroq(temperature=0, groq_api_key=st.secrets["Groq_api"], model_name="mixtral-8x7b-32768")
40
 
41
  embeddings = download_hugging_face_embeddings()
42
 
 
62
 
63
  PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
64
  chain_type_kwargs = {"prompt": PROMPT}
65
+ retrieval_chain = RetrievalQA.from_chain_type(llm=chat,
66
  chain_type="stuff",
67
  retriever=docsearch.as_retriever(
68
  search_kwargs={'k': 2}),