GianJSX commited on
Commit
fa84b58
β€’
1 Parent(s): 65ce18a

Upload 6 files

Browse files
Files changed (6) hide show
  1. .chainlit/config.toml +78 -0
  2. Dockerfile +16 -0
  3. app.py +136 -0
  4. chainlit.md +12 -0
  5. langsmith_config.py +8 -0
  6. requirements.txt +5 -0
.chainlit/config.toml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = false
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Authorize users to upload files with messages
22
+ multi_modal = true
23
+
24
+ # Allows user to use speech to text
25
+ [features.speech_to_text]
26
+ enabled = false
27
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
28
+ # language = "en-US"
29
+
30
+ [UI]
31
+ # Name of the app and chatbot.
32
+ name = "Chatbot"
33
+
34
+ # Show the readme while the conversation is empty.
35
+ show_readme_as_default = true
36
+
37
+ # Description of the app and chatbot. This is used for HTML tags.
38
+ # description = ""
39
+
40
+ # Large size content are by default collapsed for a cleaner ui
41
+ default_collapse_content = true
42
+
43
+ # The default value for the expand messages settings.
44
+ default_expand_messages = false
45
+
46
+ # Hide the chain of thought details from the user in the UI.
47
+ hide_cot = false
48
+
49
+ # Link to your github repo. This will add a github button in the UI's header.
50
+ # github = ""
51
+
52
+ # Specify a CSS file that can be used to customize the user interface.
53
+ # The CSS file can be served from the public directory or via an external link.
54
+ # custom_css = "/public/test.css"
55
+
56
+ # Override default MUI light theme. (Check theme.ts)
57
+ [UI.theme.light]
58
+ #background = "#FAFAFA"
59
+ #paper = "#FFFFFF"
60
+
61
+ [UI.theme.light.primary]
62
+ #main = "#F80061"
63
+ #dark = "#980039"
64
+ #light = "#FFE7EB"
65
+
66
+ # Override default MUI dark theme. (Check theme.ts)
67
+ [UI.theme.dark]
68
+ #background = "#FAFAFA"
69
+ #paper = "#FFFFFF"
70
+
71
+ [UI.theme.dark.primary]
72
+ #main = "#F80061"
73
+ #dark = "#980039"
74
+ #light = "#FFE7EB"
75
+
76
+
77
+ [meta]
78
+ generated_by = "0.7.400"
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ RUN chown -R user:user $HOME/app
9
+ RUN chmod -R 755 $HOME/app
10
+ COPY ./requirements.txt ~/app/requirements.txt
11
+ # Install the requirements
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+ COPY . .
14
+ ENV ENV_FILE_PATH /.env
15
+ EXPOSE 7860
16
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+
4
+ from langchain.embeddings.openai import OpenAIEmbeddings
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.vectorstores.chroma import Chroma
7
+ from langchain.chains import (
8
+ ConversationalRetrievalChain,
9
+ )
10
+ from langchain.chat_models import ChatOpenAI
11
+ from langchain.prompts.chat import (
12
+ ChatPromptTemplate,
13
+ SystemMessagePromptTemplate,
14
+ HumanMessagePromptTemplate,
15
+ )
16
+ from langchain.docstore.document import Document
17
+ from langchain.memory import ChatMessageHistory, ConversationBufferMemory
18
+ from langsmith_config import setup_langsmith_config
19
+ import openai
20
+ import chainlit as cl
21
+
22
+ openai.api_key = os.environ["OPENAI_API_KEY"]
23
+ setup_langsmith_config()
24
+
25
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
26
+
27
+ system_template = """Use the following pieces of context to answer the users question.
28
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
29
+ ALWAYS return a "SOURCES" part in your answer.
30
+ The "SOURCES" part should be a reference to the source of the document from which you got your answer.
31
+
32
+ And if the user greets with greetings like Hi, hello, How are you, etc reply accordingly as well.
33
+
34
+ Example of your response should be:
35
+
36
+ The answer is foo
37
+ SOURCES: xyz
38
+
39
+
40
+ Begin!
41
+ ----------------
42
+ {summaries}"""
43
+ messages = [
44
+ SystemMessagePromptTemplate.from_template(system_template),
45
+ HumanMessagePromptTemplate.from_template("{question}"),
46
+ ]
47
+ prompt = ChatPromptTemplate.from_messages(messages)
48
+ chain_type_kwargs = {"prompt": prompt}
49
+
50
+
51
+ @cl.on_chat_start
52
+ async def on_chat_start():
53
+ files = None
54
+
55
+ # Wait for the user to upload a file
56
+ while files == None:
57
+ files = await cl.AskFileMessage(
58
+ content="Please upload a text file to begin!",
59
+ accept=["text/plain"],
60
+ max_size_mb=20,
61
+ timeout=180,
62
+ ).send()
63
+
64
+ file = files[0]
65
+
66
+ msg = cl.Message(
67
+ content=f"Processing `{file.name}`...", disable_human_feedback=True
68
+ )
69
+ await msg.send()
70
+
71
+ # Decode the file
72
+ text = file.content.decode("utf-8")
73
+
74
+ # Split the text into chunks
75
+ texts = text_splitter.split_text(text)
76
+
77
+ # Create a metadata for each chunk
78
+ metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]
79
+
80
+ # Create a Chroma vector store
81
+ embeddings = OpenAIEmbeddings()
82
+ docsearch = await cl.make_async(Chroma.from_texts)(
83
+ texts, embeddings, metadatas=metadatas
84
+ )
85
+
86
+ message_history = ChatMessageHistory()
87
+
88
+ memory = ConversationBufferMemory(
89
+ memory_key="chat_history",
90
+ output_key="answer",
91
+ chat_memory=message_history,
92
+ return_messages=True,
93
+ )
94
+
95
+ # Create a chain that uses the Chroma vector store
96
+ chain = ConversationalRetrievalChain.from_llm(
97
+ ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
98
+ chain_type="stuff",
99
+ retriever=docsearch.as_retriever(),
100
+ memory=memory,
101
+ return_source_documents=True,
102
+ )
103
+
104
+ # Let the user know that the system is ready
105
+ msg.content = f"Processing `{file.name}` done. You can now ask questions!"
106
+ await msg.update()
107
+
108
+ cl.user_session.set("chain", chain)
109
+
110
+
111
+ @cl.on_message
112
+ async def main(message: cl.Message):
113
+ chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain
114
+ cb = cl.AsyncLangchainCallbackHandler()
115
+
116
+ res = await chain.acall(message.content, callbacks=[cb])
117
+ answer = res["answer"]
118
+ source_documents = res["source_documents"] # type: List[Document]
119
+
120
+ text_elements = [] # type: List[cl.Text]
121
+
122
+ if source_documents:
123
+ for source_idx, source_doc in enumerate(source_documents):
124
+ source_name = f"source_{source_idx}"
125
+ # Create the text element referenced in the message
126
+ text_elements.append(
127
+ cl.Text(content=source_doc.page_content, name=source_name)
128
+ )
129
+ source_names = [text_el.name for text_el in text_elements]
130
+
131
+ if source_names:
132
+ answer += f"\nSources: {', '.join(source_names)}"
133
+ else:
134
+ answer += "\nNo sources found"
135
+
136
+ await cl.Message(content=answer, elements=text_elements).send()
chainlit.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! πŸš€πŸ€–
2
+
3
+ This is a demo project to help you get started with Chainlit. It contains a simple implementation of a Chainlit bot that can be deployed to the Huggingface Spaces platform, with Docker.
4
+ - Langchain
5
+ - Chainlit
6
+ - ChromaDB
7
+
8
+
9
+ ## Useful Links πŸ”—
10
+
11
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) πŸ“š
12
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! πŸ’¬
langsmith_config.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ def setup_langsmith_config():
4
+ os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" # Update with your API URL if using a hosted instance of Langsmith.
5
+ os.environ["LANGCHAIN_API_KEY"] = os.environ["LANGCHAIN_API_KEY"] # Update with your API key
6
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
7
+ project_name = "qa-chainlit-langchain-demo" # Update with your project name
8
+ os.environ["LANGCHAIN_PROJECT"] = project_name # Optional: "default" is used if not set
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ chainlit
2
+ openai
3
+ tiktoken
4
+ langchain
5
+ chromadb