vinhnx90 commited on
Commit
5435ca6
1 Parent(s): 384e373

Update key config

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. apikey.py +3 -5
  3. app.py +8 -1
  4. requirements.txt +3 -1
README.md CHANGED
@@ -83,7 +83,7 @@ touch .env
83
  Inside `.env` file, pass the API Key into `OPENAI_API_KEY` value
84
 
85
  ```sh
86
- OPENAI_API_KEY = "sk-..."
87
  ```
88
 
89
  5. **Run the Streamlit App**:
 
83
  Inside `.env` file, pass the API Key into `OPENAI_API_KEY` value
84
 
85
  ```sh
86
+ OPENAI_API_KEY={YOUR_API_KEY_HERE}
87
  ```
88
 
89
  5. **Run the Streamlit App**:
apikey.py CHANGED
@@ -1,8 +1,6 @@
1
  import os
2
- import openai
3
 
4
- from dotenv import load_dotenv, find_dotenv
5
 
6
- _ = load_dotenv(find_dotenv())
7
-
8
- openai_api_key = os.environ["OPENAI_API_KEY"]
 
1
  import os
2
+ from dotenv import load_dotenv
3
 
4
+ load_dotenv()
5
 
6
+ llm_api_key = os.environ.get("OPENAI_API_KEY")
 
 
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import streamlit as st
3
 
 
4
  from langchain.chat_models import ChatOpenAI
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain.embeddings.openai import OpenAIEmbeddings
@@ -12,6 +13,8 @@ from langchain.document_loaders import (
12
  TextLoader,
13
  )
14
 
 
 
15
 
16
  def load_and_process_file(file_data):
17
  """
@@ -54,7 +57,11 @@ def initialize_chat_model(vector_store):
54
  Initialize the chat model with the given vector store.
55
  Returns a ConversationalRetrievalChain instance.
56
  """
57
- llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
 
 
 
 
58
  retriever = vector_store.as_retriever()
59
  return ConversationalRetrievalChain.from_llm(llm, retriever)
60
 
 
1
  import os
2
  import streamlit as st
3
 
4
+ from apikey import llm_api_key
5
  from langchain.chat_models import ChatOpenAI
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.embeddings.openai import OpenAIEmbeddings
 
13
  TextLoader,
14
  )
15
 
16
+ key = llm_api_key
17
+
18
 
19
  def load_and_process_file(file_data):
20
  """
 
57
  Initialize the chat model with the given vector store.
58
  Returns a ConversationalRetrievalChain instance.
59
  """
60
+ llm = ChatOpenAI(
61
+ model="gpt-3.5-turbo",
62
+ temperature=0,
63
+ openai_api_key=key,
64
+ )
65
  retriever = vector_store.as_retriever()
66
  return ConversationalRetrievalChain.from_llm(llm, retriever)
67
 
requirements.txt CHANGED
@@ -1,6 +1,8 @@
1
- streamlit
2
  langchain
 
 
3
  chromadb
4
  openai
5
  tiktoken
6
  pypdf
 
 
 
1
  langchain
2
+ streamlit
3
+ streamlit_chat
4
  chromadb
5
  openai
6
  tiktoken
7
  pypdf
8
+ python-dotenv