Spaces:
Runtime error
Runtime error
lalanikarim
commited on
Commit
•
d0ebe90
0
Parent(s):
initial commit
Browse files- .gitignore +161 -0
- LICENSE +21 -0
- README.md +46 -0
- main.py +140 -0
- requirements.txt +3 -0
.gitignore
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
models/**
|
2 |
+
# Byte-compiled / optimized / DLL files
|
3 |
+
__pycache__/
|
4 |
+
*.py[cod]
|
5 |
+
*$py.class
|
6 |
+
|
7 |
+
# C extensions
|
8 |
+
*.so
|
9 |
+
|
10 |
+
# Distribution / packaging
|
11 |
+
.Python
|
12 |
+
build/
|
13 |
+
develop-eggs/
|
14 |
+
dist/
|
15 |
+
downloads/
|
16 |
+
eggs/
|
17 |
+
.eggs/
|
18 |
+
lib/
|
19 |
+
lib64/
|
20 |
+
parts/
|
21 |
+
sdist/
|
22 |
+
var/
|
23 |
+
wheels/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
*.py,cover
|
51 |
+
.hypothesis/
|
52 |
+
.pytest_cache/
|
53 |
+
cover/
|
54 |
+
|
55 |
+
# Translations
|
56 |
+
*.mo
|
57 |
+
*.pot
|
58 |
+
|
59 |
+
# Django stuff:
|
60 |
+
*.log
|
61 |
+
local_settings.py
|
62 |
+
db.sqlite3
|
63 |
+
db.sqlite3-journal
|
64 |
+
|
65 |
+
# Flask stuff:
|
66 |
+
instance/
|
67 |
+
.webassets-cache
|
68 |
+
|
69 |
+
# Scrapy stuff:
|
70 |
+
.scrapy
|
71 |
+
|
72 |
+
# Sphinx documentation
|
73 |
+
docs/_build/
|
74 |
+
|
75 |
+
# PyBuilder
|
76 |
+
.pybuilder/
|
77 |
+
target/
|
78 |
+
|
79 |
+
# Jupyter Notebook
|
80 |
+
.ipynb_checkpoints
|
81 |
+
|
82 |
+
# IPython
|
83 |
+
profile_default/
|
84 |
+
ipython_config.py
|
85 |
+
|
86 |
+
# pyenv
|
87 |
+
# For a library or package, you might want to ignore these files since the code is
|
88 |
+
# intended to run in multiple environments; otherwise, check them in:
|
89 |
+
# .python-version
|
90 |
+
|
91 |
+
# pipenv
|
92 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
93 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
94 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
95 |
+
# install all needed dependencies.
|
96 |
+
#Pipfile.lock
|
97 |
+
|
98 |
+
# poetry
|
99 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
100 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
101 |
+
# commonly ignored for libraries.
|
102 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
103 |
+
#poetry.lock
|
104 |
+
|
105 |
+
# pdm
|
106 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
107 |
+
#pdm.lock
|
108 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
109 |
+
# in version control.
|
110 |
+
# https://pdm.fming.dev/#use-with-ide
|
111 |
+
.pdm.toml
|
112 |
+
|
113 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
114 |
+
__pypackages__/
|
115 |
+
|
116 |
+
# Celery stuff
|
117 |
+
celerybeat-schedule
|
118 |
+
celerybeat.pid
|
119 |
+
|
120 |
+
# SageMath parsed files
|
121 |
+
*.sage.py
|
122 |
+
|
123 |
+
# Environments
|
124 |
+
.env
|
125 |
+
.venv
|
126 |
+
env/
|
127 |
+
venv/
|
128 |
+
ENV/
|
129 |
+
env.bak/
|
130 |
+
venv.bak/
|
131 |
+
|
132 |
+
# Spyder project settings
|
133 |
+
.spyderproject
|
134 |
+
.spyproject
|
135 |
+
|
136 |
+
# Rope project settings
|
137 |
+
.ropeproject
|
138 |
+
|
139 |
+
# mkdocs documentation
|
140 |
+
/site
|
141 |
+
|
142 |
+
# mypy
|
143 |
+
.mypy_cache/
|
144 |
+
.dmypy.json
|
145 |
+
dmypy.json
|
146 |
+
|
147 |
+
# Pyre type checker
|
148 |
+
.pyre/
|
149 |
+
|
150 |
+
# pytype static type analyzer
|
151 |
+
.pytype/
|
152 |
+
|
153 |
+
# Cython debug symbols
|
154 |
+
cython_debug/
|
155 |
+
|
156 |
+
# PyCharm
|
157 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
158 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
159 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
160 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
161 |
+
#.idea/
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Karim Lalani
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Streamlit + Langchain + LLama.cpp w/ Mistral + Conversational Memory
|
2 |
+
|
3 |
+
Run your own AI Chatbot locally without a GPU.
|
4 |
+
|
5 |
+
To make that possible, we use the [Mistral 7b](https://mistral.ai/news/announcing-mistral-7b/) model.
|
6 |
+
However, you can use any quantized model that is supported by [llama.cpp](https://github.com/ggerganov/llama.cpp).
|
7 |
+
|
8 |
+
This model will chatbot will allow you to define it's personality and respond to the questions accordingly.
|
9 |
+
This example remembers the chat history allowing you to ask follow up questions.
|
10 |
+
|
11 |
+
# TL;DR instructions
|
12 |
+
|
13 |
+
1. Install llama-cpp-python
|
14 |
+
2. Install langchain
|
15 |
+
3. Install streamlit
|
16 |
+
4. Download Mistral from HuggingFace from TheBloke's repo: mistral-7b-instruct-v0.1.Q4_0.gguf
|
17 |
+
5. Place model file in the `models` subfolder
|
18 |
+
6. Run streamlit
|
19 |
+
|
20 |
+
# Step by Step instructions
|
21 |
+
|
22 |
+
The setup assumes you have `python` already installed and `venv` module available.
|
23 |
+
|
24 |
+
1. Download the code or clone the repository.
|
25 |
+
2. Inside the root folder of the repository, initialize a python virtual environment:
|
26 |
+
```bash
|
27 |
+
python -m venv venv
|
28 |
+
```
|
29 |
+
3. Activate the python envitonment:
|
30 |
+
```bash
|
31 |
+
source venv/bin/activate
|
32 |
+
```
|
33 |
+
4. Install required packages (`langchain`, `llama.cpp`, and `streamlit`):
|
34 |
+
```bash
|
35 |
+
pip install -r requirements.txt
|
36 |
+
```
|
37 |
+
5. Create a subdirectory to place the models in:
|
38 |
+
```bash
|
39 |
+
mkdir -p models
|
40 |
+
```
|
41 |
+
6. Download the `Mistral7b` quantized model from `huggingface` from the following link:
|
42 |
+
[mistral-7b-instruct-v0.1.Q4_0.gguf](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_0.gguf)
|
43 |
+
7. Start `streamlit`:
|
44 |
+
```bash
|
45 |
+
streamlit run main.py
|
46 |
+
```
|
main.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.llms import LlamaCpp
|
3 |
+
from langchain.schema import SystemMessage
|
4 |
+
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, PromptTemplate
|
5 |
+
from langchain.chains import LLMChain
|
6 |
+
from langchain.memory import ConversationBufferMemory
|
7 |
+
# from langchain.callbacks.manager import CallbackManager
|
8 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
9 |
+
import json
|
10 |
+
|
11 |
+
|
12 |
+
# StreamHandler to intercept streaming output from the LLM.
|
13 |
+
# This makes it appear that the Language Model is "typing"
|
14 |
+
# in realtime.
|
15 |
+
class StreamHandler(BaseCallbackHandler):
|
16 |
+
def __init__(self, container, initial_text=""):
|
17 |
+
self.container = container
|
18 |
+
self.text = initial_text
|
19 |
+
|
20 |
+
def on_llm_new_token(self, token: str, **kwargs) -> None:
|
21 |
+
self.text += token
|
22 |
+
self.container.markdown(self.text)
|
23 |
+
|
24 |
+
|
25 |
+
@st.cache_resource
|
26 |
+
def create_chain(system_prompt):
|
27 |
+
# A stream handler to direct streaming output on the chat screen.
|
28 |
+
# This will need to be handled somewhat differently.
|
29 |
+
# But it demonstrates what potential it carries.
|
30 |
+
# stream_handler = StreamHandler(st.empty())
|
31 |
+
|
32 |
+
# Callback manager is a way to intercept streaming output from the
|
33 |
+
# LLM and take some action on it. Here we are giving it our custom
|
34 |
+
# stream handler to make it appear as if the LLM is typing the
|
35 |
+
# responses in real time.
|
36 |
+
# callback_manager = CallbackManager([stream_handler])
|
37 |
+
|
38 |
+
llm = LlamaCpp(
|
39 |
+
model_path="models/mistral-7b-instruct-v0.1.Q4_0.gguf",
|
40 |
+
temperature=0,
|
41 |
+
max_tokens=512,
|
42 |
+
top_p=1,
|
43 |
+
# callback_manager=callback_manager,
|
44 |
+
verbose=False,
|
45 |
+
streaming=True,
|
46 |
+
)
|
47 |
+
|
48 |
+
# Template you will use to structure your user input into before converting
|
49 |
+
# into a prompt. Here, my template first injects the personality I wish to give
|
50 |
+
# to the LLM before in the form of system_prompt pushing the actual prompt from the user.
|
51 |
+
# Then we'll inject the chat history followed by the user prompt and a placeholder token
|
52 |
+
# for the LLM to complete.
|
53 |
+
template = """
|
54 |
+
{}
|
55 |
+
|
56 |
+
{}
|
57 |
+
|
58 |
+
Human: {}
|
59 |
+
AI:
|
60 |
+
""".format(system_prompt, "{chat_history}","{human_input}")
|
61 |
+
|
62 |
+
# We create a prompt from the template so we can use it with langchain
|
63 |
+
# prompt = ChatPromptTemplate.from_messages([
|
64 |
+
# SystemMessage(content=system_prompt),
|
65 |
+
# MessagesPlaceholder(variable_name="chat_history"),
|
66 |
+
# HumanMessagePromptTemplate.from_template("{human_input}")
|
67 |
+
# ])
|
68 |
+
prompt = PromptTemplate(input_variables=["chat_history","human_input"], template=template)
|
69 |
+
|
70 |
+
# Conversation buffer memory will keep track of the conversation in the memory
|
71 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
72 |
+
|
73 |
+
# We create an llm chain with our llm with prompt and memory
|
74 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm, memory=memory, verbose=True)
|
75 |
+
|
76 |
+
return llm_chain
|
77 |
+
|
78 |
+
|
79 |
+
# Set the webpage title
|
80 |
+
st.set_page_config(
|
81 |
+
page_title="Your own Chat!"
|
82 |
+
)
|
83 |
+
|
84 |
+
# Create a header element
|
85 |
+
st.header("Your own Chat!")
|
86 |
+
|
87 |
+
# This sets the LLM's personality for each prompt.
|
88 |
+
# The initial personality privided is basic.
|
89 |
+
# Try something interesting and notice how the LLM responses are affected.
|
90 |
+
system_prompt = st.text_area(
|
91 |
+
label="System Prompt",
|
92 |
+
value="You are a helpful AI assistant who answers questions in short sentences.",
|
93 |
+
key="system_prompt")
|
94 |
+
|
95 |
+
# Create llm chain to use for our chat bot.
|
96 |
+
llm_chain = create_chain(system_prompt)
|
97 |
+
|
98 |
+
# We store the conversation in the session state.
|
99 |
+
# This will be used to render the chat conversation.
|
100 |
+
# We initialize it with the first message we want to be greeted with.
|
101 |
+
if "messages" not in st.session_state:
|
102 |
+
st.session_state.messages = [
|
103 |
+
{"role": "assistant", "content": "How may I help you today?"}
|
104 |
+
]
|
105 |
+
|
106 |
+
if "current_response" not in st.session_state:
|
107 |
+
st.session_state.current_response = ""
|
108 |
+
|
109 |
+
# We loop through each message in the session state and render it as
|
110 |
+
# a chat message.
|
111 |
+
for message in st.session_state.messages:
|
112 |
+
with st.chat_message(message["role"]):
|
113 |
+
st.markdown(message["content"])
|
114 |
+
|
115 |
+
# We take questions/instructions from the chat input to pass to the LLM
|
116 |
+
if user_prompt := st.chat_input("Your message here", key="user_input"):
|
117 |
+
|
118 |
+
# Add our input to the session state
|
119 |
+
st.session_state.messages.append(
|
120 |
+
{"role": "user", "content": user_prompt}
|
121 |
+
)
|
122 |
+
|
123 |
+
# Add our input to the chat window
|
124 |
+
with st.chat_message("user"):
|
125 |
+
st.markdown(user_prompt)
|
126 |
+
|
127 |
+
# Pass our input to the llm chain and capture the final responses.
|
128 |
+
# It is worth noting that the Stream Handler is already receiving the
|
129 |
+
# streaming response as the llm is generating. We get our response
|
130 |
+
# here once the llm has finished generating the complete response.
|
131 |
+
response = llm_chain.predict(human_input=user_prompt)
|
132 |
+
|
133 |
+
# Add the response to the session state
|
134 |
+
st.session_state.messages.append(
|
135 |
+
{"role": "assistant", "content": response}
|
136 |
+
)
|
137 |
+
|
138 |
+
# Add the response to the chat window
|
139 |
+
with st.chat_message("assistant"):
|
140 |
+
st.markdown(response)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
langchain==0.0.321
|
2 |
+
llama_cpp_python==0.2.11
|
3 |
+
streamlit==1.27.2
|