Spaces:
Sleeping
Sleeping
Joshua Sundance Bailey
commited on
Commit
•
81e48fa
0
Parent(s):
initial commit
Browse files- .dockerignore +11 -0
- .github/ISSUE_TEMPLATE/bug_report.md +38 -0
- .github/ISSUE_TEMPLATE/feature_request.md +17 -0
- .github/dependabot.yml +11 -0
- .github/pull_request_template.md +12 -0
- .github/workflows/check-file-size-limit.yml +14 -0
- .github/workflows/docker-hub.yml +32 -0
- .github/workflows/hf-space.yml +21 -0
- .gitignore +92 -0
- .pre-commit-config.yaml +61 -0
- Dockerfile +19 -0
- LICENSE +9 -0
- README.md +9 -0
- bumpver.toml +16 -0
- docker-compose.yml +13 -0
- requirements.txt +8 -0
- streamlit-gpt4o/app.py +168 -0
.dockerignore
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
.env-example
|
3 |
+
.git/
|
4 |
+
.github
|
5 |
+
.gitignore
|
6 |
+
.idea
|
7 |
+
.mypy_cache
|
8 |
+
.pre-commit-config.yaml
|
9 |
+
.ruff_cache
|
10 |
+
Dockerfile
|
11 |
+
docker-compose.yml
|
.github/ISSUE_TEMPLATE/bug_report.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Bug report
|
3 |
+
about: Create a report to help us improve
|
4 |
+
title: ''
|
5 |
+
labels: bug
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**Describe the bug**
|
11 |
+
A clear and concise description of what the bug is.
|
12 |
+
|
13 |
+
**To Reproduce**
|
14 |
+
Steps to reproduce the behavior:
|
15 |
+
1. Go to '...'
|
16 |
+
2. Click on '....'
|
17 |
+
3. Scroll down to '....'
|
18 |
+
4. See error
|
19 |
+
|
20 |
+
**Expected behavior**
|
21 |
+
A clear and concise description of what you expected to happen.
|
22 |
+
|
23 |
+
**Screenshots**
|
24 |
+
If applicable, add screenshots to help explain your problem.
|
25 |
+
|
26 |
+
**Desktop (please complete the following information):**
|
27 |
+
- OS: [e.g. iOS]
|
28 |
+
- Browser [e.g. chrome, safari]
|
29 |
+
- Version [e.g. 22]
|
30 |
+
|
31 |
+
**Smartphone (please complete the following information):**
|
32 |
+
- Device: [e.g. iPhone6]
|
33 |
+
- OS: [e.g. iOS8.1]
|
34 |
+
- Browser [e.g. stock browser, safari]
|
35 |
+
- Version [e.g. 22]
|
36 |
+
|
37 |
+
**Additional context**
|
38 |
+
Add any other context about the problem here.
|
.github/ISSUE_TEMPLATE/feature_request.md
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Feature request
|
3 |
+
about: Suggest an idea for this project
|
4 |
+
title: ''
|
5 |
+
labels: enhancement
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**Describe the solution you'd like**
|
11 |
+
A clear and concise description of what you want to happen.
|
12 |
+
|
13 |
+
**Describe alternatives you've considered**
|
14 |
+
A clear and concise description of any alternative solutions or features you've considered.
|
15 |
+
|
16 |
+
**Additional context**
|
17 |
+
Add any other context or screenshots about the feature request here.
|
.github/dependabot.yml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# To get started with Dependabot version updates, you'll need to specify which
|
2 |
+
# package ecosystems to update and where the package manifests are located.
|
3 |
+
# Please see the documentation for all configuration options:
|
4 |
+
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
5 |
+
|
6 |
+
version: 2
|
7 |
+
updates:
|
8 |
+
- package-ecosystem: "pip" # See documentation for possible values
|
9 |
+
directory: "/" # Location of package manifests
|
10 |
+
schedule:
|
11 |
+
interval: "weekly"
|
.github/pull_request_template.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Thank you for contributing!
|
2 |
+
Before submitting this PR, please make sure:
|
3 |
+
|
4 |
+
- [ ] Your code builds clean without any errors or warnings
|
5 |
+
- [ ] Your code doesn't break anything we can't fix
|
6 |
+
- [ ] You have added appropriate tests
|
7 |
+
|
8 |
+
Please check one or more of the following to describe the nature of this PR:
|
9 |
+
- [ ] New feature
|
10 |
+
- [ ] Bug fix
|
11 |
+
- [ ] Documentation
|
12 |
+
- [ ] Other
|
.github/workflows/check-file-size-limit.yml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 10 MB file size limit
|
2 |
+
on:
|
3 |
+
pull_request:
|
4 |
+
branches: [main]
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
check-file-sizes:
|
8 |
+
runs-on: ubuntu-latest
|
9 |
+
steps:
|
10 |
+
- name: Check large files
|
11 |
+
uses: ActionsDesk/lfs-warning@v2.0
|
12 |
+
with:
|
13 |
+
filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces
|
14 |
+
token: ${{ secrets.WORKFLOW_GIT_ACCESS_TOKEN }}
|
.github/workflows/docker-hub.yml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Push to Docker Hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
tags:
|
6 |
+
- '*.*.*'
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build-and-push-docker:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
steps:
|
12 |
+
- uses: actions/checkout@v2
|
13 |
+
with:
|
14 |
+
fetch-depth: 0
|
15 |
+
token: ${{ secrets.WORKFLOW_GIT_ACCESS_TOKEN }}
|
16 |
+
|
17 |
+
- name: Log in to Docker Hub
|
18 |
+
uses: docker/login-action@v1
|
19 |
+
with:
|
20 |
+
username: joshuasundance
|
21 |
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
22 |
+
|
23 |
+
- name: Build Docker image
|
24 |
+
run: |
|
25 |
+
docker build \
|
26 |
+
--target runtime \
|
27 |
+
-t joshuasundance/streamlit-gpt4o:${{ github.ref_name }} \
|
28 |
+
-t joshuasundance/streamlit-gpt4o:latest \
|
29 |
+
.
|
30 |
+
|
31 |
+
- name: Push to Docker Hub
|
32 |
+
run: docker push -a joshuasundance/streamlit-gpt4o
|
.github/workflows/hf-space.yml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Push to HuggingFace Space
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
workflow_dispatch:
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
push-to-huggingface:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
steps:
|
12 |
+
- uses: actions/checkout@v2
|
13 |
+
with:
|
14 |
+
fetch-depth: 0
|
15 |
+
token: ${{ secrets.WORKFLOW_GIT_ACCESS_TOKEN }}
|
16 |
+
|
17 |
+
- name: Push to HuggingFace Space
|
18 |
+
env:
|
19 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
20 |
+
run: |
|
21 |
+
git push https://joshuasundance:$HF_TOKEN@huggingface.co/spaces/joshuasundance/streamlit-gpt4o main
|
.gitignore
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*$py.class
|
2 |
+
*.chainlit
|
3 |
+
*.chroma
|
4 |
+
*.cover
|
5 |
+
*.egg
|
6 |
+
*.egg-info/
|
7 |
+
*.env
|
8 |
+
.env*
|
9 |
+
*.langchain.db
|
10 |
+
*.log
|
11 |
+
*.manifest
|
12 |
+
*.mo
|
13 |
+
*.pot
|
14 |
+
*.py,cover
|
15 |
+
*.py[cod]
|
16 |
+
*.sage.py
|
17 |
+
*.so
|
18 |
+
*.spec
|
19 |
+
.DS_STORE
|
20 |
+
.Python
|
21 |
+
.cache
|
22 |
+
.coverage
|
23 |
+
.coverage.*
|
24 |
+
.dmypy.json
|
25 |
+
.idea/
|
26 |
+
.eggs/
|
27 |
+
.env
|
28 |
+
.hypothesis/
|
29 |
+
.installed.cfg
|
30 |
+
.ipynb_checkpoints
|
31 |
+
.mypy_cache/
|
32 |
+
.nox/
|
33 |
+
.pyre/
|
34 |
+
.pytest_cache/
|
35 |
+
.python-version
|
36 |
+
.ropeproject
|
37 |
+
.ruff_cache/
|
38 |
+
.scrapy
|
39 |
+
.spyderproject
|
40 |
+
.spyproject
|
41 |
+
.tox/
|
42 |
+
.venv
|
43 |
+
.vscode
|
44 |
+
.webassets-cache
|
45 |
+
/site
|
46 |
+
ENV/
|
47 |
+
MANIFEST
|
48 |
+
__pycache__
|
49 |
+
__pycache__/
|
50 |
+
__pypackages__/
|
51 |
+
build/
|
52 |
+
celerybeat-schedule
|
53 |
+
celerybeat.pid
|
54 |
+
coverage.xml
|
55 |
+
credentials.json
|
56 |
+
data/
|
57 |
+
db.sqlite3
|
58 |
+
db.sqlite3-journal
|
59 |
+
develop-eggs/
|
60 |
+
dist/
|
61 |
+
dmypy.json
|
62 |
+
docs/_build/
|
63 |
+
downloads/
|
64 |
+
eggs/
|
65 |
+
env.bak/
|
66 |
+
env/
|
67 |
+
fly.toml
|
68 |
+
htmlcov/
|
69 |
+
instance/
|
70 |
+
ipython_config.py
|
71 |
+
junk/
|
72 |
+
lib/
|
73 |
+
lib64/
|
74 |
+
local_settings.py
|
75 |
+
models/*.bin
|
76 |
+
nosetests.xml
|
77 |
+
notebooks/scratch/
|
78 |
+
parts/
|
79 |
+
pip-delete-this-directory.txt
|
80 |
+
pip-log.txt
|
81 |
+
pip-wheel-metadata/
|
82 |
+
profile_default/
|
83 |
+
sdist/
|
84 |
+
share/python-wheels/
|
85 |
+
storage
|
86 |
+
target/
|
87 |
+
token.json
|
88 |
+
var/
|
89 |
+
venv
|
90 |
+
venv.bak/
|
91 |
+
venv/
|
92 |
+
wheels/
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Don't know what this file is? See https://pre-commit.com/
|
2 |
+
# pip install pre-commit
|
3 |
+
# pre-commit install
|
4 |
+
# pre-commit autoupdate
|
5 |
+
# Apply to all files without commiting:
|
6 |
+
# pre-commit run --all-files
|
7 |
+
# I recommend running this until you pass all checks, and then commit.
|
8 |
+
# Fix what you need to and then let the pre-commit hooks resolve their conflicts.
|
9 |
+
# You may need to git add -u between runs.
|
10 |
+
exclude: "AI_CHANGELOG.md"
|
11 |
+
repos:
|
12 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
13 |
+
rev: "v0.4.4"
|
14 |
+
hooks:
|
15 |
+
- id: ruff
|
16 |
+
args: [--fix, --exit-non-zero-on-fix, --ignore, E501]
|
17 |
+
- repo: https://github.com/koalaman/shellcheck-precommit
|
18 |
+
rev: v0.10.0
|
19 |
+
hooks:
|
20 |
+
- id: shellcheck
|
21 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
22 |
+
rev: v4.6.0
|
23 |
+
hooks:
|
24 |
+
- id: check-ast
|
25 |
+
- id: check-builtin-literals
|
26 |
+
- id: check-merge-conflict
|
27 |
+
- id: check-symlinks
|
28 |
+
- id: check-toml
|
29 |
+
- id: check-xml
|
30 |
+
- id: debug-statements
|
31 |
+
- id: check-case-conflict
|
32 |
+
- id: check-docstring-first
|
33 |
+
- id: check-executables-have-shebangs
|
34 |
+
- id: check-json
|
35 |
+
# - id: check-yaml
|
36 |
+
- id: debug-statements
|
37 |
+
- id: fix-byte-order-marker
|
38 |
+
- id: detect-private-key
|
39 |
+
- id: end-of-file-fixer
|
40 |
+
- id: trailing-whitespace
|
41 |
+
- id: mixed-line-ending
|
42 |
+
- id: requirements-txt-fixer
|
43 |
+
- repo: https://github.com/psf/black
|
44 |
+
rev: 24.4.2
|
45 |
+
hooks:
|
46 |
+
- id: black
|
47 |
+
- repo: https://github.com/asottile/add-trailing-comma
|
48 |
+
rev: v3.1.0
|
49 |
+
hooks:
|
50 |
+
- id: add-trailing-comma
|
51 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
52 |
+
rev: v1.10.0
|
53 |
+
hooks:
|
54 |
+
- id: mypy
|
55 |
+
additional_dependencies:
|
56 |
+
- types-requests
|
57 |
+
- repo: https://github.com/PyCQA/bandit
|
58 |
+
rev: 1.7.8
|
59 |
+
hooks:
|
60 |
+
- id: bandit
|
61 |
+
args: ["-x", "tests/*.py", "-s", "B113"]
|
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim-bookworm
|
2 |
+
|
3 |
+
RUN adduser --uid 1000 --disabled-password --gecos '' appuser
|
4 |
+
USER 1000
|
5 |
+
|
6 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
7 |
+
PYTHONUNBUFFERED=1 \
|
8 |
+
PATH="/home/appuser/.local/bin:$PATH"
|
9 |
+
|
10 |
+
RUN pip install --user --no-cache-dir --upgrade pip
|
11 |
+
|
12 |
+
COPY ./requirements.txt /home/appuser/requirements.txt
|
13 |
+
RUN pip install --user --no-cache-dir --upgrade -r /home/appuser/requirements.txt
|
14 |
+
|
15 |
+
COPY ./streamlit-gpt4o/ /home/appuser/streamlit-gpt4o/
|
16 |
+
|
17 |
+
WORKDIR /home/appuser/streamlit-gpt4o
|
18 |
+
|
19 |
+
CMD ["streamlit", "run", "/home/appuser/streamlit-gpt4o/app.py", "--server.port", "7860", "--server.address", "0.0.0.0"]
|
LICENSE
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Joshua Sundance Bailey
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
6 |
+
|
7 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
8 |
+
|
9 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# streamlit-gpt4o
|
2 |
+
|
3 |
+
A Streamlit component that enables a rich multimodal chat interface, allowing users to input text and upload images within Streamlit applications.
|
4 |
+
|
5 |
+
`[...]`
|
6 |
+
|
7 |
+
# TODO
|
8 |
+
|
9 |
+
- [ ] Make a README ;)
|
bumpver.toml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[bumpver]
|
2 |
+
current_version = "0.0.1"
|
3 |
+
version_pattern = "MAJOR.MINOR.PATCH[-TAG]"
|
4 |
+
commit_message = "Bump version {old_version} -> {new_version}"
|
5 |
+
tag_message = "{new_version}"
|
6 |
+
tag_scope = "branch"
|
7 |
+
pre_commit_hook = ""
|
8 |
+
post_commit_hook = ""
|
9 |
+
commit = true
|
10 |
+
tag = true
|
11 |
+
push = true
|
12 |
+
|
13 |
+
[bumpver.file_patterns]
|
14 |
+
"bumpver.toml" = [
|
15 |
+
'current_version = "{version}"',
|
16 |
+
]
|
docker-compose.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3.8'
|
2 |
+
|
3 |
+
services:
|
4 |
+
streamlit-gpt4o:
|
5 |
+
build: .
|
6 |
+
ports:
|
7 |
+
- "${APP_PORT:-7860}:${APP_PORT:-7860}"
|
8 |
+
command: [
|
9 |
+
"streamlit", "run",
|
10 |
+
"/home/appuser/streamlit-gpt4o/app.py",
|
11 |
+
"--server.port", "${APP_PORT:-7860}",
|
12 |
+
"--server.address", "0.0.0.0"
|
13 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
https://github.com/joshuasundance-swca/st-multimodal-chatinput/releases/download/v0.1.7-experimental-unapproved-text-color/st_multimodal_chatinput-0.1.7-py3-none-any.whl
|
2 |
+
langchain
|
3 |
+
langchain-openai
|
4 |
+
langsmith
|
5 |
+
openai
|
6 |
+
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
7 |
+
streamlit
|
8 |
+
tiktoken
|
streamlit-gpt4o/app.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
from datetime import datetime
|
4 |
+
from uuid import uuid4
|
5 |
+
|
6 |
+
import streamlit as st
|
7 |
+
from langchain_community.chat_message_histories import (
|
8 |
+
StreamlitChatMessageHistory,
|
9 |
+
)
|
10 |
+
from langchain_core.messages import HumanMessage
|
11 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
+
from langchain_core.runnables.history import RunnableWithMessageHistory
|
13 |
+
from langchain_openai import ChatOpenAI
|
14 |
+
from st_multimodal_chatinput import multimodal_chatinput
|
15 |
+
|
16 |
+
logging.basicConfig(level=logging.DEBUG)
|
17 |
+
|
18 |
+
|
19 |
+
def chat_input_to_human_message(chat_input: dict) -> HumanMessage:
|
20 |
+
text = chat_input.get("text", "")
|
21 |
+
images = chat_input.get("images", [])
|
22 |
+
human_message = HumanMessage(
|
23 |
+
content=[
|
24 |
+
{
|
25 |
+
"type": "text",
|
26 |
+
"text": text,
|
27 |
+
},
|
28 |
+
]
|
29 |
+
+ [
|
30 |
+
{
|
31 |
+
"type": "image_url",
|
32 |
+
"image_url": {
|
33 |
+
"url": image,
|
34 |
+
},
|
35 |
+
}
|
36 |
+
for image in images
|
37 |
+
],
|
38 |
+
)
|
39 |
+
return human_message
|
40 |
+
|
41 |
+
|
42 |
+
def render_human_contents(msg: HumanMessage) -> None:
|
43 |
+
for d in msg.content:
|
44 |
+
if d["type"] == "text":
|
45 |
+
st.write(d["text"])
|
46 |
+
elif d["type"] == "image_url":
|
47 |
+
st.image(d["image_url"]["url"], use_column_width=True)
|
48 |
+
|
49 |
+
|
50 |
+
prompt = ChatPromptTemplate.from_messages(
|
51 |
+
[
|
52 |
+
(
|
53 |
+
"system",
|
54 |
+
"You are a multimodal AI chatbot having a conversation with a human. "
|
55 |
+
"You can accept text and images as input, but you can only respond with text. "
|
56 |
+
"The current time is {date_time}.",
|
57 |
+
),
|
58 |
+
MessagesPlaceholder(variable_name="history"),
|
59 |
+
MessagesPlaceholder(variable_name="input"),
|
60 |
+
],
|
61 |
+
).partial(date_time=datetime.now().strftime("%B %d, %Y %H:%M:%S"))
|
62 |
+
|
63 |
+
|
64 |
+
llm = None
|
65 |
+
runnable = None
|
66 |
+
with_message_history = None
|
67 |
+
|
68 |
+
langsmith_api_key = None
|
69 |
+
langsmith_project_name = None
|
70 |
+
langsmith_client = None
|
71 |
+
|
72 |
+
chat_input_dict = None
|
73 |
+
chat_input_human_message = None
|
74 |
+
|
75 |
+
history = StreamlitChatMessageHistory(key="chat_messages")
|
76 |
+
|
77 |
+
if not st.session_state.get("session_id", None):
|
78 |
+
st.session_state.session_id = str(uuid4())
|
79 |
+
|
80 |
+
top = st.container()
|
81 |
+
bottom = st.container()
|
82 |
+
|
83 |
+
with st.sidebar:
|
84 |
+
openai_api_key = st.text_input("OpenAI API Key", type="password")
|
85 |
+
use_gpt4o = st.toggle(label="`gpt-4-turbo` ⇄ `gpt-4o`", value=True)
|
86 |
+
model_option = "gpt-4o" if use_gpt4o else "gpt-4-turbo"
|
87 |
+
if openai_api_key:
|
88 |
+
llm = ChatOpenAI(
|
89 |
+
model=model_option,
|
90 |
+
streaming=True,
|
91 |
+
verbose=True,
|
92 |
+
openai_api_key=openai_api_key,
|
93 |
+
)
|
94 |
+
runnable = prompt | llm
|
95 |
+
with_message_history = RunnableWithMessageHistory(
|
96 |
+
runnable,
|
97 |
+
lambda _: history,
|
98 |
+
input_messages_key="input",
|
99 |
+
history_messages_key="history",
|
100 |
+
)
|
101 |
+
|
102 |
+
langsmith_api_key = st.text_input("LangSmith API Key", type="password")
|
103 |
+
langsmith_project_name = st.text_input(
|
104 |
+
"LangSmith Project Name",
|
105 |
+
value="streamlit-gpt4o",
|
106 |
+
)
|
107 |
+
langsmith_endpoint = st.text_input(
|
108 |
+
"LangSmith Endpoint",
|
109 |
+
value="https://api.smith.langchain.com",
|
110 |
+
)
|
111 |
+
if langsmith_api_key and langsmith_project_name:
|
112 |
+
os.environ["LANGCHAIN_API_KEY"] = langsmith_api_key
|
113 |
+
os.environ["LANGCHAIN_PROJECT"] = langsmith_project_name
|
114 |
+
os.environ["LANGCHAIN_ENDPOINT"] = langsmith_endpoint
|
115 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
116 |
+
|
117 |
+
else:
|
118 |
+
for key in (
|
119 |
+
"LANGCHAIN_API_KEY",
|
120 |
+
"LANGCHAIN_PROJECT",
|
121 |
+
"LANGCHAIN_ENDPOINT",
|
122 |
+
"LANGCHAIN_TRACING_V2",
|
123 |
+
):
|
124 |
+
os.environ.pop(key, None)
|
125 |
+
|
126 |
+
st.markdown(
|
127 |
+
f"## Current session ID\n`{st.session_state.get('session_id', '<none>')}`",
|
128 |
+
)
|
129 |
+
if st.button("Clear message history"):
|
130 |
+
history.clear()
|
131 |
+
st.session_state.session_id = None
|
132 |
+
st.rerun()
|
133 |
+
|
134 |
+
|
135 |
+
if not with_message_history:
|
136 |
+
st.error("Please enter an OpenAI API key in the sidebar.")
|
137 |
+
|
138 |
+
else:
|
139 |
+
with bottom:
|
140 |
+
chat_input_dict = multimodal_chatinput(text_color="black")
|
141 |
+
if chat_input_dict:
|
142 |
+
chat_input_human_message = chat_input_to_human_message(chat_input_dict)
|
143 |
+
|
144 |
+
with top:
|
145 |
+
for msg in history.messages:
|
146 |
+
if msg.type.lower() in ("user", "human"):
|
147 |
+
with st.chat_message("human"):
|
148 |
+
render_human_contents(msg)
|
149 |
+
elif msg.type.lower() in ("ai", "assistant", "aimessagechunk"):
|
150 |
+
with st.chat_message("ai"):
|
151 |
+
st.write(msg.content)
|
152 |
+
|
153 |
+
if chat_input_human_message:
|
154 |
+
|
155 |
+
with st.chat_message("human"):
|
156 |
+
render_human_contents(chat_input_human_message)
|
157 |
+
|
158 |
+
with st.chat_message("ai"):
|
159 |
+
st.write_stream(
|
160 |
+
with_message_history.stream(
|
161 |
+
{"input": [chat_input_human_message]},
|
162 |
+
{
|
163 |
+
"configurable": {"session_id": st.session_state.session_id},
|
164 |
+
},
|
165 |
+
),
|
166 |
+
)
|
167 |
+
|
168 |
+
chat_input_human_message = None
|