Spaces:
Runtime error
Runtime error
carolinefrascasnowflake
commited on
Commit
•
85894bf
1
Parent(s):
5756334
Add safety check (#5)
Browse files- Add safety check (9837bb7990a7f6b5cc0207671fda8c1106a16644)
app.py
CHANGED
@@ -3,54 +3,58 @@ import replicate
|
|
3 |
import os
|
4 |
from transformers import AutoTokenizer
|
5 |
|
6 |
-
#
|
7 |
-
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
|
|
14 |
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
|
19 |
-
st.set_page_config(page_title="Snowflake Arctic")
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
#st.success('API token loaded!', icon='✅')
|
26 |
-
replicate_api = st.secrets['REPLICATE_API_TOKEN']
|
27 |
-
else:
|
28 |
-
replicate_api = st.text_input('Enter Replicate API token:', type='password')
|
29 |
-
if not (replicate_api.startswith('r8_') and len(replicate_api)==40):
|
30 |
-
st.warning('Please enter your Replicate API token.', icon='⚠️')
|
31 |
-
st.markdown("**Don't have an API token?** Head over to [Replicate](https://replicate.com) to sign up for one.")
|
32 |
-
#else:
|
33 |
-
# st.success('API token loaded!', icon='✅')
|
34 |
-
|
35 |
-
os.environ['REPLICATE_API_TOKEN'] = replicate_api
|
36 |
-
st.subheader("Adjust model parameters")
|
37 |
-
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=5.0, value=0.3, step=0.01)
|
38 |
-
top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
|
39 |
-
|
40 |
-
# Store LLM-generated responses
|
41 |
-
if "messages" not in st.session_state.keys():
|
42 |
-
st.session_state.messages = [{"role": "assistant", "content": "Hi. I'm Arctic, a new, efficient, intelligent, and truly open language model created by Snowflake AI Research. Ask me anything."}]
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
-
st.write(message["content"])
|
48 |
|
49 |
def clear_chat_history():
|
50 |
st.session_state.messages = [{"role": "assistant", "content": "Hi. I'm Arctic, a new, efficient, intelligent, and truly open language model created by Snowflake AI Research. Ask me anything."}]
|
51 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
-
|
|
|
|
|
|
|
54 |
|
55 |
@st.cache_resource(show_spinner=False)
|
56 |
def get_tokenizer():
|
@@ -59,14 +63,63 @@ def get_tokenizer():
|
|
59 |
"""
|
60 |
return AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def get_num_tokens(prompt):
|
63 |
"""Get the number of tokens in a given prompt"""
|
64 |
tokenizer = get_tokenizer()
|
65 |
tokens = tokenizer.tokenize(prompt)
|
66 |
return len(tokens)
|
67 |
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
def generate_arctic_response():
|
|
|
70 |
prompt = []
|
71 |
for dict_message in st.session_state.messages:
|
72 |
if dict_message["role"] == "user":
|
@@ -77,30 +130,29 @@ def generate_arctic_response():
|
|
77 |
prompt.append("<|im_start|>assistant")
|
78 |
prompt.append("")
|
79 |
prompt_str = "\n".join(prompt)
|
80 |
-
|
81 |
-
if get_num_tokens(prompt_str) >= 3072:
|
82 |
-
st.error("Conversation length too long. Please keep it under 3072 tokens.")
|
83 |
-
st.button('Clear chat history', on_click=clear_chat_history, key="clear_chat_history")
|
84 |
-
st.stop()
|
85 |
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
input={"prompt": prompt_str,
|
88 |
"prompt_template": r"{prompt}",
|
89 |
-
"temperature": temperature,
|
90 |
-
"top_p": top_p,
|
91 |
-
}):
|
|
|
|
|
|
|
|
|
92 |
yield str(event)
|
93 |
|
94 |
-
#
|
95 |
-
if
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
# Generate a new response if last message is not from assistant
|
101 |
-
if st.session_state.messages[-1]["role"] != "assistant":
|
102 |
-
with st.chat_message("assistant", avatar="./Snowflake_Logomark_blue.svg"):
|
103 |
-
response = generate_arctic_response()
|
104 |
-
full_response = st.write_stream(response)
|
105 |
-
message = {"role": "assistant", "content": full_response}
|
106 |
-
st.session_state.messages.append(message)
|
|
|
3 |
import os
|
4 |
from transformers import AutoTokenizer
|
5 |
|
6 |
+
# App title
|
7 |
+
st.set_page_config(page_title="Snowflake Arctic")
|
8 |
|
9 |
+
def main():
|
10 |
+
"""Execution starts here."""
|
11 |
+
get_replicate_api_token()
|
12 |
+
display_sidebar_ui()
|
13 |
+
init_chat_history()
|
14 |
+
display_chat_messages()
|
15 |
+
get_and_process_prompt()
|
16 |
|
17 |
+
def get_replicate_api_token():
|
18 |
+
os.environ['REPLICATE_API_TOKEN'] = st.secrets['REPLICATE_API_TOKEN']
|
19 |
|
20 |
+
def display_sidebar_ui():
|
21 |
+
with st.sidebar:
|
22 |
+
st.title('Snowflake Arctic')
|
23 |
+
st.subheader("Adjust model parameters")
|
24 |
+
st.slider('temperature', min_value=0.01, max_value=5.0, value=0.3,
|
25 |
+
step=0.01, key="temperature")
|
26 |
+
st.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01,
|
27 |
+
key="top_p")
|
28 |
|
29 |
+
st.button('Clear chat history', on_click=clear_chat_history)
|
|
|
30 |
|
31 |
+
st.sidebar.caption('Build your own app powered by Arctic and [enter to win](https://arctic-streamlit-hackathon.devpost.com/) $10k in prizes.')
|
32 |
+
|
33 |
+
st.subheader("About")
|
34 |
+
st.caption('Built by [Snowflake](https://snowflake.com/) to demonstrate [Snowflake Arctic](https://www.snowflake.com/blog/arctic-open-and-efficient-foundation-language-models-snowflake). App hosted on [Streamlit Community Cloud](https://streamlit.io/cloud). Model hosted by [Replicate](https://replicate.com/snowflake/snowflake-arctic-instruct).')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
# # # Uncomment to show debug info
|
37 |
+
# st.subheader("Debug")
|
38 |
+
# st.write(st.session_state)
|
|
|
39 |
|
40 |
def clear_chat_history():
|
41 |
st.session_state.messages = [{"role": "assistant", "content": "Hi. I'm Arctic, a new, efficient, intelligent, and truly open language model created by Snowflake AI Research. Ask me anything."}]
|
42 |
+
st.session_state.chat_aborted = False
|
43 |
+
|
44 |
+
def init_chat_history():
|
45 |
+
"""Create a st.session_state.messages list to store chat messages"""
|
46 |
+
if "messages" not in st.session_state:
|
47 |
+
clear_chat_history()
|
48 |
+
check_safety()
|
49 |
+
|
50 |
+
def display_chat_messages():
|
51 |
+
# Set assistant icon to Snowflake logo
|
52 |
+
icons = {"assistant": "./Snowflake_Logomark_blue.svg", "user": "⛷️"}
|
53 |
|
54 |
+
# Display the messages
|
55 |
+
for message in st.session_state.messages:
|
56 |
+
with st.chat_message(message["role"], avatar=icons[message["role"]]):
|
57 |
+
st.write(message["content"])
|
58 |
|
59 |
@st.cache_resource(show_spinner=False)
|
60 |
def get_tokenizer():
|
|
|
63 |
"""
|
64 |
return AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
65 |
|
66 |
+
@st.cache_resource(show_spinner=False)
|
67 |
+
def get_llamaguard_deployment():
|
68 |
+
return replicate.deployments.get("snowflake/llamaguard")
|
69 |
+
|
70 |
+
def check_safety(disable=False) -> bool:
|
71 |
+
if disable:
|
72 |
+
return True
|
73 |
+
|
74 |
+
# with st.spinner("Checking prompt safety ..."):
|
75 |
+
deployment = get_llamaguard_deployment()
|
76 |
+
conversation_history = st.session_state.messages
|
77 |
+
prediction = deployment.predictions.create(
|
78 |
+
input = { "prompt": f"[INST] Task: Check if there is unsafe content in 'Agent' messages in conversations according our safety policy with the below categories.\n\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: Violence and Hate.\nShould not\n- Help people plan or engage in violence.\n- Encourage people to engage in violence.\n- Express hateful, derogatory, or demeaning sentiments against people based on sensitive personal characteristics like their race, color, religion, national origin, sexual orientation, gender, gender identity, or disability.\n- Encourage discrimination against people based on those characteristics and should not use slurs or create insults based on those characteristics.\nCan\n- Provide information on violence and discrimination.\n- Discuss the topics of hate and violence, and can discuss historical events involving violence.\nO2: Sexual Content.\nShould not\n- Engage in sexually explicit (i.e., erotic) conversations and should not encourage people to engage in sexual activities.\nCan\n- Discuss topics related to sexuality and sexual education.\n- Provide general information regarding sexual activities.\n- Provide guidance on topics related to sexual health.\nO3: Criminal Planning.\nShould not\n- Help people plan or engage in criminal activities like theft, kidnapping, or financial crimes, especially when the person indicates that they may act on the information provided to them.\n- Encourage people to engage in criminal activities or condone criminal behavior.\nCan\n- Help people understand how crimes are committed, provided that the response is not so detailed that it could be used to provide a roadmap for future crimes.\n- Help people understand how systems work that are used to prevent crimes, provided those descriptions do not provide detailed information on how to defeat those systems.\nO4: Guns and Illegal Weapons.\nShould not\n- Assist in the planning or execution of firearm or illegal weapons crimes, especially when the person indicates that they may act on the information provided to them.\n- Encourage people to engage in firearm or illegal weapons crimes or condone such behavior.\nCan\n- Discuss firearms and the arguments for and against firearm ownership.\nO5: Regulated or Controlled Substances.\nShould not\n- Encourage or assist people in the trafficking, consumption, or use of regulated or controlled substances, including but not limited to illegal drugs, tobacco, alcohol, and cannabis.\n- Assist or encourage people to create such substances when it is illegal to do so.\nCan\n- Discuss the use of regulated or controlled substances, and can provide general information on those substances (ex: what their chemical composition is and what their effects are).\n- Provide information on how those substances are manufactured, provided that the information is not so detailed as to provide step-by-step instructions for their creation.\nO6: Self-Harm.\nShould not\n- Encourage people to harm themselves.\n- Romanticize or condone self-harm or suicide.\n- Provide information regarding the methods of suicide or self-harm.\n- Provide any information or advice, other than information regarding appropriate health resources, to people who express or imply that they wish to harm themselves.\nCan\n- Provide general information regarding self-harm and suicide (ex: statistics, the mental health challenges faced by those who take these actions).\nShould\n- Provide information regarding appropriate health resources to people who imply or express the desire to harm themselves.\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\n{conversation_history}\n\n<END CONVERSATION>\n\nProvide your safety assessment for Agent in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST]" }
|
79 |
+
)
|
80 |
+
prediction.wait()
|
81 |
+
output = prediction.output
|
82 |
+
|
83 |
+
if output is not None and "unsafe" in output:
|
84 |
+
return False
|
85 |
+
else:
|
86 |
+
return True
|
87 |
+
|
88 |
def get_num_tokens(prompt):
|
89 |
"""Get the number of tokens in a given prompt"""
|
90 |
tokenizer = get_tokenizer()
|
91 |
tokens = tokenizer.tokenize(prompt)
|
92 |
return len(tokens)
|
93 |
|
94 |
+
def abort_chat(error_message: str):
|
95 |
+
"""Display an error message requiring the chat to be cleared.
|
96 |
+
Forces a rerun of the app."""
|
97 |
+
assert error_message, "Error message must be provided."
|
98 |
+
error_message = f":red[{error_message}]"
|
99 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
100 |
+
st.session_state.messages.append({"role": "assistant", "content": error_message})
|
101 |
+
else:
|
102 |
+
st.session_state.messages[-1]["content"] = error_message
|
103 |
+
st.session_state.chat_aborted = True
|
104 |
+
st.rerun()
|
105 |
+
|
106 |
+
def get_and_process_prompt():
|
107 |
+
"""Get the user prompt and process it"""
|
108 |
+
# Generate a new response if last message is not from assistant
|
109 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
110 |
+
with st.chat_message("assistant", avatar="./Snowflake_Logomark_blue.svg"):
|
111 |
+
response = generate_arctic_response()
|
112 |
+
st.write_stream(response)
|
113 |
+
|
114 |
+
if st.session_state.chat_aborted:
|
115 |
+
st.button('Reset chat', on_click=clear_chat_history, key="clear_chat_history")
|
116 |
+
st.chat_input(disabled=True)
|
117 |
+
elif prompt := st.chat_input():
|
118 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
119 |
+
st.rerun()
|
120 |
+
|
121 |
def generate_arctic_response():
|
122 |
+
"""String generator for the Snowflake Arctic response."""
|
123 |
prompt = []
|
124 |
for dict_message in st.session_state.messages:
|
125 |
if dict_message["role"] == "user":
|
|
|
130 |
prompt.append("<|im_start|>assistant")
|
131 |
prompt.append("")
|
132 |
prompt_str = "\n".join(prompt)
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
+
num_tokens = get_num_tokens(prompt_str)
|
135 |
+
max_tokens = 1500
|
136 |
+
|
137 |
+
if num_tokens >= max_tokens:
|
138 |
+
abort_chat(f"Conversation length too long. Please keep it under {max_tokens} tokens.")
|
139 |
+
|
140 |
+
st.session_state.messages.append({"role": "assistant", "content": ""})
|
141 |
+
for event_index, event in enumerate(replicate.stream("snowflake/snowflake-arctic-instruct",
|
142 |
input={"prompt": prompt_str,
|
143 |
"prompt_template": r"{prompt}",
|
144 |
+
"temperature": st.session_state.temperature,
|
145 |
+
"top_p": st.session_state.top_p,
|
146 |
+
})):
|
147 |
+
if (event_index + 0) % 50 == 0:
|
148 |
+
if not check_safety():
|
149 |
+
abort_chat("I cannot answer this question.")
|
150 |
+
st.session_state.messages[-1]["content"] += str(event)
|
151 |
yield str(event)
|
152 |
|
153 |
+
# Final safety check...
|
154 |
+
if not check_safety():
|
155 |
+
abort_chat("I cannot answer this question.")
|
156 |
+
|
157 |
+
if __name__ == "__main__":
|
158 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|