Spaces:
Runtime error
Runtime error
Upload 6 files
Browse files- .chainlit/config.toml +78 -0
- Dockerfile +13 -0
- app.py +106 -0
- chainlit.md +14 -0
- langsmith_config.py +8 -0
- requirements.txt +3 -0
.chainlit/config.toml
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
# Whether to enable telemetry (default: true). No personal data is collected.
|
3 |
+
enable_telemetry = false
|
4 |
+
|
5 |
+
# List of environment variables to be provided by each user to use the app.
|
6 |
+
user_env = []
|
7 |
+
|
8 |
+
# Duration (in seconds) during which the session is saved when the connection is lost
|
9 |
+
session_timeout = 3600
|
10 |
+
|
11 |
+
# Enable third parties caching (e.g LangChain cache)
|
12 |
+
cache = false
|
13 |
+
|
14 |
+
# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
|
15 |
+
# follow_symlink = false
|
16 |
+
|
17 |
+
[features]
|
18 |
+
# Show the prompt playground
|
19 |
+
prompt_playground = true
|
20 |
+
|
21 |
+
# Authorize users to upload files with messages
|
22 |
+
multi_modal = true
|
23 |
+
|
24 |
+
# Allows user to use speech to text
|
25 |
+
[features.speech_to_text]
|
26 |
+
enabled = false
|
27 |
+
# See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
|
28 |
+
# language = "en-US"
|
29 |
+
|
30 |
+
[UI]
|
31 |
+
# Name of the app and chatbot.
|
32 |
+
name = "Chatbot"
|
33 |
+
|
34 |
+
# Show the readme while the conversation is empty.
|
35 |
+
show_readme_as_default = true
|
36 |
+
|
37 |
+
# Description of the app and chatbot. This is used for HTML tags.
|
38 |
+
# description = ""
|
39 |
+
|
40 |
+
# Large size content are by default collapsed for a cleaner ui
|
41 |
+
default_collapse_content = true
|
42 |
+
|
43 |
+
# The default value for the expand messages settings.
|
44 |
+
default_expand_messages = false
|
45 |
+
|
46 |
+
# Hide the chain of thought details from the user in the UI.
|
47 |
+
hide_cot = false
|
48 |
+
|
49 |
+
# Link to your github repo. This will add a github button in the UI's header.
|
50 |
+
# github = ""
|
51 |
+
|
52 |
+
# Specify a CSS file that can be used to customize the user interface.
|
53 |
+
# The CSS file can be served from the public directory or via an external link.
|
54 |
+
# custom_css = "/public/test.css"
|
55 |
+
|
56 |
+
# Override default MUI light theme. (Check theme.ts)
|
57 |
+
[UI.theme.light]
|
58 |
+
#background = "#FAFAFA"
|
59 |
+
#paper = "#FFFFFF"
|
60 |
+
|
61 |
+
[UI.theme.light.primary]
|
62 |
+
#main = "#F80061"
|
63 |
+
#dark = "#980039"
|
64 |
+
#light = "#FFE7EB"
|
65 |
+
|
66 |
+
# Override default MUI dark theme. (Check theme.ts)
|
67 |
+
[UI.theme.dark]
|
68 |
+
#background = "#FAFAFA"
|
69 |
+
#paper = "#FFFFFF"
|
70 |
+
|
71 |
+
[UI.theme.dark.primary]
|
72 |
+
#main = "#F80061"
|
73 |
+
#dark = "#980039"
|
74 |
+
#light = "#FFE7EB"
|
75 |
+
|
76 |
+
|
77 |
+
[meta]
|
78 |
+
generated_by = "0.7.501"
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
RUN chown -R user:user $HOME/app
|
9 |
+
RUN chmod -R 755 $HOME/app
|
10 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
11 |
+
RUN pip install -r requirements.txt
|
12 |
+
COPY . .
|
13 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import chainlit as cl
|
2 |
+
from openai import OpenAI
|
3 |
+
from langsmith.run_helpers import traceable
|
4 |
+
from langsmith_config import setup_langsmith_config
|
5 |
+
import base64
|
6 |
+
import os
|
7 |
+
|
8 |
+
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
|
9 |
+
model = "gpt-4-1106-preview"
|
10 |
+
model_vision = "gpt-4-vision-preview"
|
11 |
+
setup_langsmith_config()
|
12 |
+
|
13 |
+
def process_images(msg: cl.Message):
|
14 |
+
# Processing images exclusively
|
15 |
+
images = [file for file in msg.elements if "image" in file.mime]
|
16 |
+
|
17 |
+
# Accessing the bytes of a specific image
|
18 |
+
image_bytes = images[0].content # take the first image just for demo purposes
|
19 |
+
|
20 |
+
# we need base64 encoded image
|
21 |
+
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
|
22 |
+
return image_base64
|
23 |
+
|
24 |
+
async def process_stream(stream, msg: cl.Message):
|
25 |
+
for part in stream:
|
26 |
+
if token := part.choices[0].delta.content or "":
|
27 |
+
await msg.stream_token(token)
|
28 |
+
|
29 |
+
def handle_vision_call(msg, image_history):
|
30 |
+
image_base64 = None
|
31 |
+
image_base64 = process_images(msg)
|
32 |
+
|
33 |
+
if image_base64:
|
34 |
+
# add the image to the image history
|
35 |
+
image_history.append(
|
36 |
+
{
|
37 |
+
"role": "user",
|
38 |
+
"content": [
|
39 |
+
{"type": "text", "text": msg.content},
|
40 |
+
{
|
41 |
+
"type": "image_url",
|
42 |
+
"image_url": {
|
43 |
+
"url": f"data:image/jpeg;base64,{image_base64}"
|
44 |
+
}
|
45 |
+
},
|
46 |
+
],
|
47 |
+
}
|
48 |
+
)
|
49 |
+
stream = gpt_vision_call(image_history)
|
50 |
+
return stream
|
51 |
+
|
52 |
+
@traceable(run_type="llm", name="gpt 4 turbo call")
|
53 |
+
async def gpt_call(message_history: list = []):
|
54 |
+
client = OpenAI()
|
55 |
+
|
56 |
+
stream = client.chat.completions.create(
|
57 |
+
model=model,
|
58 |
+
messages=message_history,
|
59 |
+
stream=True,
|
60 |
+
)
|
61 |
+
|
62 |
+
return stream
|
63 |
+
|
64 |
+
@traceable(run_type="llm", name="gpt 4 turbo vision call")
|
65 |
+
def gpt_vision_call(image_history: list = []):
|
66 |
+
client = OpenAI()
|
67 |
+
|
68 |
+
stream = client.chat.completions.create(
|
69 |
+
model=model_vision,
|
70 |
+
messages=image_history,
|
71 |
+
max_tokens=1000,
|
72 |
+
stream=True,
|
73 |
+
)
|
74 |
+
|
75 |
+
return stream
|
76 |
+
|
77 |
+
@cl.on_chat_start
|
78 |
+
def start_chat():
|
79 |
+
cl.user_session.set(
|
80 |
+
"message_history",
|
81 |
+
[{"role": "system", "content": "You are a helpful assistant."}],
|
82 |
+
)
|
83 |
+
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])
|
84 |
+
|
85 |
+
@cl.on_message
|
86 |
+
async def on_message(msg: cl.Message):
|
87 |
+
message_history = cl.user_session.get("message_history")
|
88 |
+
image_history = cl.user_session.get("image_history")
|
89 |
+
|
90 |
+
stream_msg = cl.Message(content="")
|
91 |
+
stream = None
|
92 |
+
|
93 |
+
if msg.elements:
|
94 |
+
stream = handle_vision_call(msg, image_history)
|
95 |
+
|
96 |
+
else:
|
97 |
+
# add the message in both to keep the coherence between the two histories
|
98 |
+
message_history.append({"role": "user", "content": msg.content})
|
99 |
+
image_history.append({"role": "user", "content": msg.content})
|
100 |
+
|
101 |
+
stream = await gpt_call(message_history)
|
102 |
+
|
103 |
+
if stream:
|
104 |
+
await process_stream(stream, msg=stream_msg)
|
105 |
+
image_history.append({"role": "system", "content": stream_msg.content})
|
106 |
+
message_history.append({"role": "system", "content": stream_msg.content})
|
chainlit.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Welcome to Chainlit! ππ€
|
2 |
+
|
3 |
+
Hi there, Developer! π We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
|
4 |
+
|
5 |
+
## Useful Links π
|
6 |
+
|
7 |
+
- **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) π
|
8 |
+
- **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! π¬
|
9 |
+
|
10 |
+
We can't wait to see what you create with Chainlit! Happy coding! π»π
|
11 |
+
|
12 |
+
## Welcome screen
|
13 |
+
|
14 |
+
To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
|
langsmith_config.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def setup_langsmith_config():
|
4 |
+
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" # Update with your API URL if using a hosted instance of Langsmith.
|
5 |
+
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") # Update with your API key
|
6 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
7 |
+
project_name = "GPT-4-VISION-DEMO" # Update with your project name
|
8 |
+
os.environ["LANGCHAIN_PROJECT"] = project_name # Optional: "default" is used if not set
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
chainlit
|
2 |
+
openai
|
3 |
+
langsmith
|