Jawad138 commited on
Commit
2865e6a
1 Parent(s): 96f51d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -51,13 +51,16 @@ def display_chat_history(chain):
51
 
52
  def create_conversational_chain(vector_store):
53
  load_dotenv()
 
 
 
54
  llm = Replicate(
55
- streaming=True,
56
- model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
57
- replicate_api_token="r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl",
58
- callbacks=[StreamingStdOutCallbackHandler()],
59
- input={"temperature": 0.01, "max_length": 500, "top_p": 1}
60
- )
61
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
62
 
63
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
@@ -102,4 +105,4 @@ def main():
102
  display_chat_history(chain)
103
 
104
  if __name__ == "__main__":
105
- main()
 
51
 
52
  def create_conversational_chain(vector_store):
53
  load_dotenv()
54
+ replicate_api_token = "r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl" # Replace with your actual token
55
+ os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
56
+
57
  llm = Replicate(
58
+ streaming=True,
59
+ model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
60
+ callbacks=[StreamingStdOutCallbackHandler()],
61
+ input={"temperature": 0.01, "max_length": 500, "top_p": 1},
62
+ replicate_api_token=replicate_api_token
63
+ )
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
 
66
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
 
105
  display_chat_history(chain)
106
 
107
  if __name__ == "__main__":
108
+ main()