mickkhaw commited on
Commit
4719703
1 Parent(s): a2cc26d

using chainlit v1.0.0 in requirements txt to get rid of errors. complete code in app.py. works locally in localhost 8000

Browse files
.chainlit/config.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
+ [UI]
37
+ # Name of the app and chatbot.
38
+ name = "Chatbot"
39
+
40
+ # Show the readme while the thread is empty.
41
+ show_readme_as_default = true
42
+
43
+ # Description of the app and chatbot. This is used for HTML tags.
44
+ # description = ""
45
+
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
+ # The default value for the expand messages settings.
50
+ default_expand_messages = false
51
+
52
+ # Hide the chain of thought details from the user in the UI.
53
+ hide_cot = false
54
+
55
+ # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = ""
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
+
62
+ # Override default MUI light theme. (Check theme.ts)
63
+ [UI.theme.light]
64
+ #background = "#FAFAFA"
65
+ #paper = "#FFFFFF"
66
+
67
+ [UI.theme.light.primary]
68
+ #main = "#F80061"
69
+ #dark = "#980039"
70
+ #light = "#FFE7EB"
71
+
72
+ # Override default MUI dark theme. (Check theme.ts)
73
+ [UI.theme.dark]
74
+ #background = "#FAFAFA"
75
+ #paper = "#FFFFFF"
76
+
77
+ [UI.theme.dark.primary]
78
+ #main = "#F80061"
79
+ #dark = "#980039"
80
+ #light = "#FFE7EB"
81
+
82
+
83
+ [meta]
84
+ generated_by = "1.0.0"
__pycache__/app.cpython-312.pyc ADDED
Binary file (5.67 kB). View file
 
app.py CHANGED
@@ -11,25 +11,20 @@ from langchain_core.prompts import PromptTemplate
11
  from langchain.schema.output_parser import StrOutputParser
12
  from langchain.schema.runnable import RunnablePassthrough
13
  from langchain.schema.runnable.config import RunnableConfig
14
-
15
  # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
16
  # ---- ENV VARIABLES ---- #
17
  """
18
  This function will load our environment file (.env) if it is present.
19
-
20
  NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
21
  """
22
  load_dotenv()
23
-
24
  """
25
  We will load our environment variables here.
26
  """
27
  HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
28
  HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
29
- HF_TOKEN = os.environ["HF_TOKEN"]
30
-
31
  # ---- GLOBAL DECLARATIONS ---- #
32
-
33
  # -- RETRIEVAL -- #
34
  """
35
  1. Load Documents from Text File
@@ -39,16 +34,17 @@ HF_TOKEN = os.environ["HF_TOKEN"]
39
  """
40
  ### 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
41
  ### NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
42
- text_loader =
43
- documents =
44
-
45
  ### 2. CREATE TEXT SPLITTER AND SPLIT DOCUMENTS
46
- text_splitter =
47
- split_documents =
48
-
49
  ### 3. LOAD HUGGINGFACE EMBEDDINGS
50
- hf_embeddings =
51
-
 
 
 
52
  if os.path.exists("./data/vectorstore"):
53
  vectorstore = FAISS.load_local(
54
  "./data/vectorstore",
@@ -62,71 +58,79 @@ else:
62
  os.makedirs("./data/vectorstore", exist_ok=True)
63
  ### 4. INDEX FILES
64
  ### NOTE: REMEMBER TO BATCH THE DOCUMENTS WITH MAXIMUM BATCH SIZE = 32
65
-
 
 
 
 
 
66
  hf_retriever = vectorstore.as_retriever()
67
-
68
  # -- AUGMENTED -- #
69
  """
70
  1. Define a String Template
71
  2. Create a Prompt Template from the String Template
72
  """
73
  ### 1. DEFINE STRING TEMPLATE
74
- RAG_PROMPT_TEMPLATE =
75
-
 
 
 
 
 
 
 
 
76
  ### 2. CREATE PROMPT TEMPLATE
77
- rag_prompt =
78
-
79
  # -- GENERATION -- #
80
  """
81
  1. Create a HuggingFaceEndpoint for the LLM
82
  """
83
  ### 1. CREATE HUGGINGFACE ENDPOINT FOR LLM
84
- hf_llm =
85
-
 
 
 
 
 
 
 
 
86
  @cl.author_rename
87
  def rename(original_author: str):
88
  """
89
  This function can be used to rename the 'author' of a message.
90
-
91
  In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
92
  """
93
  rename_dict = {
94
  "Assistant" : "Paul Graham Essay Bot"
95
  }
96
  return rename_dict.get(original_author, original_author)
97
-
98
  @cl.on_chat_start
99
  async def start_chat():
100
  """
101
  This function will be called at the start of every user session.
102
-
103
  We will build our LCEL RAG chain here, and store it in the user session.
104
-
105
  The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
106
  """
107
-
108
  ### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
109
- lcel_rag_chain =
110
-
111
  cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
112
-
113
  @cl.on_message
114
  async def main(message: cl.Message):
115
  """
116
  This function will be called every time a message is recieved from a session.
117
-
118
  We will use the LCEL RAG chain to generate a response to the user query.
119
-
120
  The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
121
  """
122
  lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
123
-
124
  msg = cl.Message(content="")
125
-
126
  async for chunk in lcel_rag_chain.astream(
127
  {"query": message.content},
128
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
129
  ):
130
  await msg.stream_token(chunk)
131
-
132
  await msg.send()
 
11
  from langchain.schema.output_parser import StrOutputParser
12
  from langchain.schema.runnable import RunnablePassthrough
13
  from langchain.schema.runnable.config import RunnableConfig
 
14
  # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
15
  # ---- ENV VARIABLES ---- #
16
  """
17
  This function will load our environment file (.env) if it is present.
 
18
  NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
19
  """
20
  load_dotenv()
 
21
  """
22
  We will load our environment variables here.
23
  """
24
  HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
25
  HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
26
+ HF_TOKEN = os.environ.get("HF_TOKEN")
 
27
  # ---- GLOBAL DECLARATIONS ---- #
 
28
  # -- RETRIEVAL -- #
29
  """
30
  1. Load Documents from Text File
 
34
  """
35
  ### 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
36
  ### NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
37
+ text_loader = TextLoader("./data/paul_graham_essays.txt")
38
+ documents = text_loader.load() # Load the documents
 
39
  ### 2. CREATE TEXT SPLITTER AND SPLIT DOCUMENTS
40
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
41
+ split_documents = text_splitter.split_documents(documents)
 
42
  ### 3. LOAD HUGGINGFACE EMBEDDINGS
43
+ hf_embeddings = HuggingFaceEndpointEmbeddings(
44
+ model=HF_EMBED_ENDPOINT,
45
+ task="feature-extraction",
46
+ huggingfacehub_api_token=HF_TOKEN,
47
+ )
48
  if os.path.exists("./data/vectorstore"):
49
  vectorstore = FAISS.load_local(
50
  "./data/vectorstore",
 
58
  os.makedirs("./data/vectorstore", exist_ok=True)
59
  ### 4. INDEX FILES
60
  ### NOTE: REMEMBER TO BATCH THE DOCUMENTS WITH MAXIMUM BATCH SIZE = 32
61
+ for i in range(0, len(split_documents), 32):
62
+ if i == 0:
63
+ vectorstore = FAISS.from_documents(split_documents[i:i+32], hf_embeddings)
64
+ continue
65
+ vectorstore.add_documents(split_documents[i:i+32])
66
+ vectorstore.save_local("./data/vectorstore")
67
  hf_retriever = vectorstore.as_retriever()
 
68
  # -- AUGMENTED -- #
69
  """
70
  1. Define a String Template
71
  2. Create a Prompt Template from the String Template
72
  """
73
  ### 1. DEFINE STRING TEMPLATE
74
+ RAG_PROMPT_TEMPLATE = """\
75
+ <|start_header_id|>system<|end_header_id|>
76
+ You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you don't know.<|eot_id|>
77
+ <|start_header_id|>user<|end_header_id|>
78
+ User Query:
79
+ {query}
80
+ Context:
81
+ {context}<|eot_id|>
82
+ <|start_header_id|>assistant<|end_header_id|>
83
+ """
84
  ### 2. CREATE PROMPT TEMPLATE
85
+ rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
 
86
  # -- GENERATION -- #
87
  """
88
  1. Create a HuggingFaceEndpoint for the LLM
89
  """
90
  ### 1. CREATE HUGGINGFACE ENDPOINT FOR LLM
91
+ hf_llm = HuggingFaceEndpoint(
92
+ endpoint_url=HF_LLM_ENDPOINT,
93
+ max_new_tokens=512,
94
+ top_k=10,
95
+ top_p=0.95,
96
+ typical_p=0.95,
97
+ temperature=0.01,
98
+ repetition_penalty=1.03,
99
+ huggingfacehub_api_token=HF_TOKEN
100
+ )
101
  @cl.author_rename
102
  def rename(original_author: str):
103
  """
104
  This function can be used to rename the 'author' of a message.
 
105
  In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
106
  """
107
  rename_dict = {
108
  "Assistant" : "Paul Graham Essay Bot"
109
  }
110
  return rename_dict.get(original_author, original_author)
 
111
  @cl.on_chat_start
112
  async def start_chat():
113
  """
114
  This function will be called at the start of every user session.
 
115
  We will build our LCEL RAG chain here, and store it in the user session.
 
116
  The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
117
  """
 
118
  ### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
119
+ # Diagram https://docs.google.com/presentation/d/1P9ohPhMdDr9VdXT7qgROZNY93B4jw1xaAvNH6g0AIIQ/edit?usp=sharing
120
+ lcel_rag_chain = {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}| rag_prompt | hf_llm
121
  cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
 
122
  @cl.on_message
123
  async def main(message: cl.Message):
124
  """
125
  This function will be called every time a message is recieved from a session.
 
126
  We will use the LCEL RAG chain to generate a response to the user query.
 
127
  The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
128
  """
129
  lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
 
130
  msg = cl.Message(content="")
 
131
  async for chunk in lcel_rag_chain.astream(
132
  {"query": message.content},
133
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
134
  ):
135
  await msg.stream_token(chunk)
 
136
  await msg.send()
chainlit.md CHANGED
@@ -1 +1 @@
1
- # FILL OUT YOUR CHAINLIT MD HERE WITH A DESCRIPTION OF YOUR APPLICATION
 
1
+ # Paul Graham Essay LangChain RAG
data/vectorstore/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a86a7e679d047cd8971a596286526815f93065a9856f197b7312146545ab323
3
+ size 13102125
data/vectorstore/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b75430ec8999b62a93a194efec725ae6440be0a344d89f1196b0f5eab8ff716
3
+ size 3470911
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- chainlit==0.7.700
2
  langchain==0.2.5
3
  langchain_community==0.2.5
4
  langchain_core==0.2.9
 
1
+ chainlit==1.0.0
2
  langchain==0.2.5
3
  langchain_community==0.2.5
4
  langchain_core==0.2.9
solution_app.py DELETED
@@ -1,155 +0,0 @@
1
- import os
2
- import chainlit as cl
3
- from dotenv import load_dotenv
4
- from operator import itemgetter
5
- from langchain_huggingface import HuggingFaceEndpoint
6
- from langchain_community.document_loaders import TextLoader
7
- from langchain_text_splitters import RecursiveCharacterTextSplitter
8
- from langchain_community.vectorstores import FAISS
9
- from langchain_huggingface import HuggingFaceEndpointEmbeddings
10
- from langchain_core.prompts import PromptTemplate
11
- from langchain.schema.output_parser import StrOutputParser
12
- from langchain.schema.runnable import RunnablePassthrough
13
- from langchain.schema.runnable.config import RunnableConfig
14
-
15
- # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
16
- # ---- ENV VARIABLES ---- #
17
- """
18
- This function will load our environment file (.env) if it is present.
19
-
20
- NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
21
- """
22
- load_dotenv()
23
-
24
- """
25
- We will load our environment variables here.
26
- """
27
- HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
28
- HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
29
- HF_TOKEN = os.environ["HF_TOKEN"]
30
-
31
- # ---- GLOBAL DECLARATIONS ---- #
32
-
33
- # -- RETRIEVAL -- #
34
- """
35
- 1. Load Documents from Text File
36
- 2. Split Documents into Chunks
37
- 3. Load HuggingFace Embeddings (remember to use the URL we set above)
38
- 4. Index Files if they do not exist, otherwise load the vectorstore
39
- """
40
- document_loader = TextLoader("./data/paul_graham_essays.txt")
41
- documents = document_loader.load()
42
-
43
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
44
- split_documents = text_splitter.split_documents(documents)
45
-
46
- hf_embeddings = HuggingFaceEndpointEmbeddings(
47
- model=HF_EMBED_ENDPOINT,
48
- task="feature-extraction",
49
- huggingfacehub_api_token=HF_TOKEN,
50
- )
51
-
52
- if os.path.exists("./data/vectorstore"):
53
- vectorstore = FAISS.load_local(
54
- "./data/vectorstore",
55
- hf_embeddings,
56
- allow_dangerous_deserialization=True # this is necessary to load the vectorstore from disk as it's stored as a `.pkl` file.
57
- )
58
- hf_retriever = vectorstore.as_retriever()
59
- print("Loaded Vectorstore")
60
- else:
61
- print("Indexing Files")
62
- os.makedirs("./data/vectorstore", exist_ok=True)
63
- for i in range(0, len(split_documents), 32):
64
- if i == 0:
65
- vectorstore = FAISS.from_documents(split_documents[i:i+32], hf_embeddings)
66
- continue
67
- vectorstore.add_documents(split_documents[i:i+32])
68
- vectorstore.save_local("./data/vectorstore")
69
-
70
- hf_retriever = vectorstore.as_retriever()
71
-
72
- # -- AUGMENTED -- #
73
- """
74
- 1. Define a String Template
75
- 2. Create a Prompt Template from the String Template
76
- """
77
- RAG_PROMPT_TEMPLATE = """\
78
- <|start_header_id|>system<|end_header_id|>
79
- You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you don't know.<|eot_id|>
80
-
81
- <|start_header_id|>user<|end_header_id|>
82
- User Query:
83
- {query}
84
-
85
- Context:
86
- {context}<|eot_id|>
87
-
88
- <|start_header_id|>assistant<|end_header_id|>
89
- """
90
-
91
- rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
92
-
93
- # -- GENERATION -- #
94
- """
95
- 1. Create a HuggingFaceEndpoint for the LLM
96
- """
97
- hf_llm = HuggingFaceEndpoint(
98
- endpoint_url=HF_LLM_ENDPOINT,
99
- max_new_tokens=512,
100
- top_k=10,
101
- top_p=0.95,
102
- temperature=0.3,
103
- repetition_penalty=1.15,
104
- huggingfacehub_api_token=HF_TOKEN,
105
- )
106
-
107
- @cl.author_rename
108
- def rename(original_author: str):
109
- """
110
- This function can be used to rename the 'author' of a message.
111
-
112
- In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
113
- """
114
- rename_dict = {
115
- "Assistant" : "Paul Graham Essay Bot"
116
- }
117
- return rename_dict.get(original_author, original_author)
118
-
119
- @cl.on_chat_start
120
- async def start_chat():
121
- """
122
- This function will be called at the start of every user session.
123
-
124
- We will build our LCEL RAG chain here, and store it in the user session.
125
-
126
- The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
127
- """
128
-
129
- lcel_rag_chain = (
130
- {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}
131
- | rag_prompt | hf_llm
132
- )
133
-
134
- cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
135
-
136
- @cl.on_message
137
- async def main(message: cl.Message):
138
- """
139
- This function will be called every time a message is recieved from a session.
140
-
141
- We will use the LCEL RAG chain to generate a response to the user query.
142
-
143
- The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
144
- """
145
- lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
146
-
147
- msg = cl.Message(content="")
148
-
149
- for chunk in await cl.make_async(lcel_rag_chain.stream)(
150
- {"query": message.content},
151
- config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
152
- ):
153
- await msg.stream_token(chunk)
154
-
155
- await msg.send()