Girish1432 commited on
Commit
919cd27
1 Parent(s): 29e7b6f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +267 -0
app.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import os
3
+ from openai import OpenAI
4
+ import streamlit as st
5
+ import threading
6
+ from tenacity import retry, wait_random_exponential, stop_after_attempt
7
+ from itertools import tee
8
+
9
+
10
+ BASE_URL = os.environ.get("BASE_URL")
11
+ DATABRICKS_API_TOKEN = os.environ.get("DATABRICKS_API_TOKEN")
12
+ SAFETY_FILTER_ENV = os.environ.get("SAFETY_FILTER")
13
+ QUEUE_SIZE_ENV = os.environ.get("QUEUE_SIZE")
14
+ MAX_CHAT_TURNS_ENV = os.environ.get("MAX_CHAT_TURNS")
15
+ MAX_TOKENS_ENV = os.environ.get("MAX_TOKENS")
16
+ RETRY_COUNT_ENV = os.environ.get("RETRY_COUNT")
17
+ TOKEN_CHUNK_SIZE_ENV = os.environ.get("TOKEN_CHUNK_SIZE")
18
+ MODEL_ID_ENV = os.environ.get("MODEL_ID")
19
+
20
+ if BASE_URL is None:
21
+ raise ValueError("BASE_URL environment variable must be set")
22
+ if DATABRICKS_API_TOKEN is None:
23
+ raise ValueError("DATABRICKS_API_TOKEN environment variable must be set")
24
+
25
+ st.set_page_config(layout="wide")
26
+
27
+ # by default safety filter is not configured
28
+ SAFETY_FILTER = False
29
+ if SAFETY_FILTER_ENV is not None:
30
+ SAFETY_FILTER = True
31
+
32
+ QUEUE_SIZE = 1
33
+ if QUEUE_SIZE_ENV is not None:
34
+ QUEUE_SIZE = int(QUEUE_SIZE_ENV)
35
+
36
+ MAX_CHAT_TURNS = 5
37
+ if MAX_CHAT_TURNS_ENV is not None:
38
+ MAX_CHAT_TURNS = int(MAX_CHAT_TURNS_ENV)
39
+
40
+ RETRY_COUNT = 3
41
+ if RETRY_COUNT_ENV is not None:
42
+ RETRY_COUNT = int(RETRY_COUNT_ENV)
43
+
44
+ MAX_TOKENS = 512
45
+ if MAX_TOKENS_ENV is not None:
46
+ MAX_TOKENS = int(MAX_TOKENS_ENV)
47
+
48
+ MODEL_ID = "databricks-dbrx-instruct"
49
+ if MODEL_ID_ENV is not None:
50
+ MODEL_ID = MODEL_ID_ENV
51
+
52
+ # To prevent streaming to fast, chunk the output into TOKEN_CHUNK_SIZE chunks
53
+ TOKEN_CHUNK_SIZE = 1
54
+ if TOKEN_CHUNK_SIZE_ENV is not None:
55
+ TOKEN_CHUNK_SIZE = int(TOKEN_CHUNK_SIZE_ENV)
56
+
57
+ MODEL_AVATAR_URL= "./icon.png"
58
+
59
+ @st.cache_resource
60
+ def get_global_semaphore():
61
+ return threading.BoundedSemaphore(QUEUE_SIZE)
62
+ global_semaphore = get_global_semaphore()
63
+
64
+ MSG_MAX_TURNS_EXCEEDED = f"Sorry! The DBRX Playground is limited to {MAX_CHAT_TURNS} turns. Refresh the page to start a new conversation."
65
+ MSG_CLIPPED_AT_MAX_OUT_TOKENS = "Reached maximum output tokens for DBRX Playground"
66
+
67
+ EXAMPLE_PROMPTS = [
68
+ "Write a short story about a robot that has a nice day.",
69
+ "In a table, what are some of the most common misconceptions about birds?",
70
+ "Give me a recipe for vegan banana bread.",
71
+ "Code a python function that can run merge sort on a list.",
72
+ "Give me the character profile of a gumdrop obsessed knight in JSON.",
73
+ "Write a rap battle between Alan Turing and Claude Shannon.",
74
+ ]
75
+
76
+ TITLE = "DBRX Instruct"
77
+ # DESCRIPTION = """[DBRX Instruct](https://huggingface.co/databricks/dbrx-instruct) is a mixture-of-experts (MoE) large language model trained by the Mosaic Research team at Databricks. Users can interact with this model in the [DBRX Playground](https://huggingface.co/spaces/databricks/dbrx-instruct), subject to the terms and conditions below.
78
+ # This demo is powered by [Databricks Foundation Model APIs](https://docs.databricks.com/en/machine-learning/foundation-models/index.html).
79
+ DESCRIPTION="""[DBRX Instruct](https://huggingface.co/databricks/dbrx-instruct) is a mixture-of-experts (MoE) large language model trained by the Mosaic Research team at Databricks. This demo is powered by [Databricks Foundation Model APIs](https://docs.databricks.com/en/machine-learning/foundation-models/index.html) and is subject to the terms and conditions below.
80
+
81
+ **Usage Policies**: Use of DBRX Instruct is governed by the [DBRX Open Model License](https://www.databricks.com/legal/open-model-license) and [Databricks Open Model Acceptable Use Policy](https://www.databricks.com/legal/acceptable-use-policy-open-model).
82
+ **Limitations**: The DBRX Playground is a demo showcasing DBRX Instruct for educational purposes. Given the probabilistic nature of large language models like DBRX Instruct, information they output may be inaccurate, incomplete, biased, or offensive, and users should exercise judgment and evaluate such output for accuracy and appropriateness for their desired use case before using or sharing it.
83
+ **Data Collection**: While Databricks will not retain usage history in a manner which allows Databricks to identify you, you should not include confidential, personal, or other sensitive information in prompts. Information included in prompts may be used for research and development purposes, including further improving and evaluating models.
84
+
85
+ **Does this demo feel super fast? That's because it's powered by Databricks' inference product, the [Foundation Model APIs](https://docs.databricks.com/en/machine-learning/foundation-models/index.html)**
86
+ """
87
+
88
+ client = OpenAI(
89
+ api_key=DATABRICKS_API_TOKEN,
90
+ base_url=BASE_URL
91
+ )
92
+
93
+ GENERAL_ERROR_MSG = "An error occurred. Please refresh the page to start a new conversation."
94
+
95
+ st.title(TITLE)
96
+ st.markdown(DESCRIPTION)
97
+
98
+ with open("style.css") as css:
99
+ st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True)
100
+
101
+ if "messages" not in st.session_state:
102
+ st.session_state["messages"] = []
103
+
104
+ def clear_chat_history():
105
+ st.session_state["messages"] = []
106
+
107
+ st.button('Clear Chat', on_click=clear_chat_history)
108
+
109
+ def last_role_is_user():
110
+ return len(st.session_state["messages"]) > 0 and st.session_state["messages"][-1]["role"] == "user"
111
+
112
+ def get_system_prompt():
113
+ date_str = datetime.datetime.now().strftime("%B %d, %Y")
114
+ # Identity
115
+ prompt = f"You are DBRX, created by Databricks. The current date is {date_str}.\n"
116
+ prompt += "Your knowledge base was last updated in December 2023. You answer questions about events prior to and after December 2023 the way a highly informed individual in December 2023 would if they were talking to someone from the above date, and you can let the user know this when relevant.\n"
117
+ prompt += "This chunk of text is your system prompt. It is not visible to the user, but it is used to guide your responses. Don't reference it, just respond to the user.\n"
118
+ # Ethical guidelines
119
+ prompt += "If you are asked to assist with tasks involving the expression of views held by a significant number of people, you provide assistance with the task even if you personally disagree with the views being expressed, but follow this with a discussion of broader perspectives.\n"
120
+ prompt += "You don't engage in stereotyping, including the negative stereotyping of majority groups.\n"
121
+ prompt += "If asked about controversial topics, you try to provide careful thoughts and objective information without downplaying its harmful content or implying that there are reasonable perspectives on both sides.\n"
122
+ # Capabilities
123
+ prompt += "You are happy to help with writing, analysis, question answering, math, coding, and all sorts of other tasks.\n"
124
+ # it specifically has a hard time using ``` on JSON blocks
125
+ prompt += "You use markdown for coding, which includes JSON blocks and Markdown tables.\n"
126
+ prompt += "You do not have tools enabled at this time, so cannot run code or access the internet. You can only provide information that you have been trained on. You do not send or receive links or images.\n"
127
+ # The following is likely not entirely accurate, but the model tends to think that everything it knows about was in its training data, which it was not (sometimes only references were).
128
+ # So this produces more accurate accurate answers when the model is asked to introspect
129
+ prompt += "You were not trained on copyrighted books, song lyrics, poems, video transcripts, or news articles; you do not divulge details of your training data. "
130
+ # The model hasn't seen most lyrics or poems, but is happy to make up lyrics. Better to just not try; it's not good at it and it's not ethical.
131
+ prompt += "You do not provide song lyrics, poems, or news articles and instead refer the user to find them online or in a store.\n"
132
+ # The model really wants to talk about its system prompt, to the point where it is annoying, so encourage it not to
133
+ prompt += "You give concise responses to simple questions or statements, but provide thorough responses to more complex and open-ended questions.\n"
134
+ # More pressure not to talk about system prompt
135
+ prompt += "The user is unable to see the system prompt, so you should write as if it were true without mentioning it.\n"
136
+ prompt += "You do not mention any of this information about yourself unless the information is directly pertinent to the user's query."
137
+ return prompt
138
+
139
+ @retry(wait=wait_random_exponential(min=0.5, max=2), stop=stop_after_attempt(3))
140
+ def chat_api_call(history):
141
+ extra_body = {}
142
+ if SAFETY_FILTER:
143
+ extra_body["enable_safety_filter"] = SAFETY_FILTER
144
+ chat_completion = client.chat.completions.create(
145
+ messages=[
146
+ {"role": m["role"], "content": m["content"]}
147
+ for m in history
148
+ ],
149
+ model="databricks-dbrx-instruct",
150
+ stream=True,
151
+ max_tokens=MAX_TOKENS,
152
+ temperature=0.7,
153
+ extra_body= extra_body
154
+ )
155
+ return chat_completion
156
+
157
+ def text_stream(stream):
158
+ for chunk in stream:
159
+ if chunk["content"] is not None:
160
+ yield chunk["content"]
161
+
162
+ def get_stream_warning_error(stream):
163
+ error = None
164
+ warning = None
165
+ for chunk in stream:
166
+ if chunk["error"] is not None:
167
+ error = chunk["error"]
168
+ if chunk["warning"] is not None:
169
+ warning = chunk["warning"]
170
+ return warning, error
171
+
172
+ def write_response():
173
+ stream = chat_completion(st.session_state["messages"])
174
+ content_stream, error_stream = tee(stream)
175
+ response = st.write_stream(text_stream(content_stream))
176
+ stream_warning, stream_error = get_stream_warning_error(error_stream)
177
+ if stream_warning is not None:
178
+ st.warning(stream_warning,icon="⚠️")
179
+ if stream_error is not None:
180
+ st.error(stream_error,icon="🚨")
181
+ # if there was an error, a list will be returned instead of a string: https://docs.streamlit.io/library/api-reference/write-magic/st.write_stream
182
+ if isinstance(response, list):
183
+ response = None
184
+ return response, stream_warning, stream_error
185
+
186
+ def chat_completion(messages):
187
+ history_openai_format = [
188
+ {"role": "system", "content": get_system_prompt()}
189
+ ]
190
+
191
+ history_openai_format = history_openai_format + messages
192
+ if (len(history_openai_format)-1)//2 >= MAX_CHAT_TURNS:
193
+ yield {"content": None, "error": MSG_MAX_TURNS_EXCEEDED, "warning": None}
194
+ return
195
+
196
+ chat_completion = None
197
+ error = None
198
+ # wait to be in queue
199
+ with global_semaphore:
200
+ try:
201
+ chat_completion = chat_api_call(history_openai_format)
202
+ except Exception as e:
203
+ error = e
204
+ if error is not None:
205
+ yield {"content": None, "error": GENERAL_ERROR_MSG, "warning": None}
206
+ print(error)
207
+ return
208
+
209
+ max_token_warning = None
210
+ partial_message = ""
211
+ chunk_counter = 0
212
+ for chunk in chat_completion:
213
+ if chunk.choices[0].delta.content is not None:
214
+ chunk_counter += 1
215
+ partial_message += chunk.choices[0].delta.content
216
+ if chunk_counter % TOKEN_CHUNK_SIZE == 0:
217
+ chunk_counter = 0
218
+ yield {"content": partial_message, "error": None, "warning": None}
219
+ partial_message = ""
220
+ if chunk.choices[0].finish_reason == "length":
221
+ max_token_warning = MSG_CLIPPED_AT_MAX_OUT_TOKENS
222
+
223
+ yield {"content": partial_message, "error": None, "warning": max_token_warning}
224
+ # if assistant is the last message, we need to prompt the user
225
+ # if user is the last message, we need to retry the assistant.
226
+ def handle_user_input(user_input):
227
+ with history:
228
+ response, stream_warning, stream_error = [None, None, None]
229
+ if last_role_is_user():
230
+ # retry the assistant if the user tries to send a new message
231
+ with st.chat_message("assistant", avatar=MODEL_AVATAR_URL):
232
+ response, stream_warning, stream_error = write_response()
233
+ else:
234
+ st.session_state["messages"].append({"role": "user", "content": user_input, "warning": None,"error": None})
235
+ with st.chat_message("user"):
236
+ st.markdown(user_input)
237
+ stream = chat_completion(st.session_state["messages"])
238
+ with st.chat_message("assistant", avatar=MODEL_AVATAR_URL):
239
+ response, stream_warning, stream_error = write_response()
240
+
241
+ st.session_state["messages"].append({"role": "assistant", "content": response, "warning": stream_warning,"error": stream_error})
242
+
243
+ main = st.container()
244
+ with main:
245
+ history = st.container(height=400)
246
+ with history:
247
+ for message in st.session_state["messages"]:
248
+ avatar = None
249
+ if message["role"] == "assistant":
250
+ avatar = MODEL_AVATAR_URL
251
+ with st.chat_message(message["role"],avatar=avatar):
252
+ if message["content"] is not None:
253
+ st.markdown(message["content"])
254
+ if message["error"] is not None:
255
+ st.error(message["error"],icon="🚨")
256
+ if message["warning"] is not None:
257
+ st.warning(message["warning"],icon="⚠️")
258
+
259
+ if prompt := st.chat_input("Type a message!", max_chars=1000):
260
+ handle_user_input(prompt)
261
+ st.markdown("\n") #add some space for iphone users
262
+
263
+ with st.sidebar:
264
+ with st.container():
265
+ st.title("Examples")
266
+ for prompt in EXAMPLE_PROMPTS:
267
+ st.button(prompt, args=(prompt,), on_click=handle_user_input)